当前位置:   article > 正文

How do I format markdown chatgpt response in tkinter frame python?

How do I format markdown chatgpt response in tkinter frame python?

题意:怎样在Tkinter框架中使用Python来格式化Markdown格式的ChatGPT响应?

问题背景:

Chatgpt sometimes responds in markdown language. Sometimes the respond contains ** ** which means the text in between should be bold and ### text ### which means that text is a heading. I want to format this correctly and display it properly in tkinter. If it's bold or a heading, it should be formatted to bold or to a heading in tkintter. How to do this?

ChatGPT有时会以Markdown语言回应。有时回应中包含** **,这表示中间的文本应该是粗体的;而### text ###则表示该文本是一个标题。我想在Tkinter中正确地格式化并显示这些文本。如果它是粗体或标题,则应该在Tkinter中以粗体或标题的形式显示。如何做到这一点?

My code:

  1. import tkinter as tk
  2. from tkinter import ttk
  3. from datetime import datetime
  4. import openai
  5. import json
  6. import requests
  7. history = []
  8. # Create a function to use ChatGPT 3.5 turbo to answer a question based on the prompt
  9. def get_answer_from_chatgpt(prompt, historyxx):
  10. global history
  11. openai.api_key = "xxxxxxx"
  12. append_to_chat_log(message="\n\n\n")
  13. append_to_chat_log("Chatgpt")
  14. print("Trying")
  15. messages = [
  16. {"role": "user", "content": prompt}
  17. ]
  18. try:
  19. stream = openai.chat.completions.create(
  20. model="gpt-3.5-turbo",
  21. messages=messages,
  22. stream=True,
  23. )
  24. for chunk in stream:
  25. chunk = chunk.choices[0].delta.content
  26. chunk = str(chunk)
  27. if chunk != "None":
  28. append_to_chat_log(message=chunk)
  29. append_to_chat_log(message="\n\n\n")
  30. print("Streaming complete")
  31. except Exception as e:
  32. print(e)
  33. return "Sorry, an error occurred while processing your request."
  34. # Create a function to use OpenAI to answer a question based on the search results
  35. def append_to_chat_log(sender=None, message=None):
  36. chat_log.config(state="normal")
  37. if sender:
  38. chat_log.insert("end", f"{sender}:\n", "sender")
  39. if message:
  40. chat_log.insert("end", message)
  41. chat_log.config(state="disabled")
  42. chat_log.see("end")
  43. chat_log.update()
  44. def send_message(event=None):
  45. global history
  46. message = message_entry.get(1.0, "end-1c")
  47. message = message.strip()
  48. message_entry.delete(1.0, tk.END)
  49. message_entry.update()
  50. if not message:
  51. pass
  52. else:
  53. append_to_chat_log("User", message)
  54. history.append(("user", message))
  55. if len(history) >4:
  56. history = history[-4:]
  57. print(message)
  58. response = get_answer_from_chatgpt(message, history)
  59. history.append(("assistant", response))
  60. root = tk.Tk()
  61. root.title("Chat")
  62. # Maximize the window
  63. root.attributes('-zoomed', True)
  64. chat_frame = tk.Frame(root)
  65. chat_frame.pack(expand=True, fill=tk.BOTH)
  66. chat_log = tk.Text(chat_frame, state='disabled', wrap='word', width=70, height=30, font=('Arial', 12), highlightthickness=0, borderwidth=0)
  67. chat_log.pack(side=tk.LEFT, padx=(500,0), pady=10)
  68. message_entry = tk.Text(root, padx=17, insertbackground='white', width=70, height=1, spacing1=20, spacing3=20, font=('Open Sans', 14))
  69. message_entry.pack(side=tk.LEFT, padx=(500, 0), pady=(0, 70)) # Adjust pady to move it slightly above the bottom
  70. message_entry.mark_set("insert", "%d.%d" % (0,0))
  71. message_entry.bind("<Return>", send_message)
  72. root.mainloop()

问题解决:

I solved my own question        我解决了我自己提出的问题

  1. import tkinter as tk
  2. from datetime import datetime
  3. import openai
  4. history = []
  5. # Create a function to use ChatGPT 3.5 turbo to answer a question based on the prompt
  6. def get_answer_from_chatgpt(prompt, historyxx):
  7. global history
  8. openai.api_key = "xxxx"
  9. append_to_chat_log(message="\n\n\n")
  10. append_to_chat_log("Chatgpt")
  11. print("Trying")
  12. messages = [
  13. {"role": "user", "content": prompt}
  14. ]
  15. try:
  16. stream = openai.chat.completions.create(
  17. model="gpt-3.5-turbo",
  18. messages=messages,
  19. stream=True,
  20. )
  21. buffer = ""
  22. heading = ""
  23. bold = False
  24. while True:
  25. chunk = next(stream)
  26. chunk = chunk.choices[0].delta.content
  27. chunk = str(chunk)
  28. if chunk != "None":
  29. buffer += chunk
  30. if "**" in buffer:
  31. while "**" in buffer:
  32. pre, _, post = buffer.partition("**")
  33. append_to_chat_log(message=pre, bold=bold)
  34. bold = not bold
  35. buffer = post
  36. if "###" in buffer:
  37. while "###" in buffer:
  38. pre, _, post = buffer.partition("###")
  39. append_to_chat_log(message=pre, bold=heading)
  40. heading = not heading
  41. buffer = post
  42. else:
  43. append_to_chat_log(message=buffer, bold=bold)
  44. buffer = ""
  45. append_to_chat_log(message="\n\n\n")
  46. print("Streaming complete")
  47. except Exception as e:
  48. print(e)
  49. return "Sorry, an error occurred while processing your request."
  50. def append_to_chat_log(sender=None, message=None, bold=False, heading=False):
  51. chat_log.config(state="normal")
  52. if sender:
  53. chat_log.insert("end", f"{sender}:\n", "sender")
  54. if message:
  55. if bold:
  56. chat_log.insert("end", message, "bold")
  57. if heading:
  58. chat_log.insert("end", message, "heading")
  59. else:
  60. chat_log.insert("end", message)
  61. chat_log.config(state="disabled")
  62. chat_log.see("end")
  63. chat_log.update()
  64. def send_message(event=None):
  65. global history
  66. message = message_entry.get(1.0, "end-1c")
  67. message = message.strip()
  68. message_entry.delete(1.0, tk.END)
  69. message_entry.update()
  70. if not message:
  71. pass
  72. else:
  73. append_to_chat_log("User", message)
  74. history.append(("user", message))
  75. if len(history) > 4:
  76. history = history[-4:]
  77. print(message)
  78. response = get_answer_from_chatgpt(message, history)
  79. history.append(("assistant", response))
  80. root = tk.Tk()
  81. root.title("Chat")
  82. # Maximize the window
  83. root.attributes('-zoomed', True)
  84. chat_frame = tk.Frame(root)
  85. chat_frame.pack(expand=True, fill=tk.BOTH)
  86. chat_log = tk.Text(chat_frame, state='disabled', wrap='word', width=70, height=30, font=('Arial', 12), highlightthickness=0, borderwidth=0)
  87. chat_log.tag_configure("sender", font=('Arial', 12, 'bold'))
  88. chat_log.tag_configure("bold", font=('Arial', 12, 'bold'))
  89. chat_log.tag_configure("heading", font=('Arial', 16, 'bold'))
  90. chat_log.pack(side=tk.LEFT, padx=(500,0), pady=10)
  91. message_entry = tk.Text(root, padx=17, insertbackground='white', width=70, height=1, spacing1=20, spacing3=20, font=('Open Sans', 14))
  92. message_entry.pack(side=tk.LEFT, padx=(500, 0), pady=(0, 70)) # Adjust pady to move it slightly above the bottom
  93. message_entry.mark_set("insert", "%d.%d" % (0,0))
  94. message_entry.bind("<Return>", send_message)
  95. root.mainloop()

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小桥流水78/article/detail/822848
推荐阅读
相关标签
  

闽ICP备14008679号