当前位置:   article > 正文

简单调用讯飞星火模型和阿里云通义千问模型进行多轮问答_星火api多轮对话

星火api多轮对话

讯飞星火认知模型:

打开讯飞星火界面(讯飞星火认知大模型-AI大语言模型-星火大模型-科大讯飞 (xfyun.cn),点击API免费使用,可免费使用个人免费包

跳转讯飞开放平台-以语音交互为核心的人工智能开放平台 (xfyun.cn),打开控制台界面,创建新应用,然后即可获取APPID,APISecret,APIKey。

多轮调用代码:将代码主函数部分的三个刚刚获取到的APPID,APISecret,APIKey输入即可,运行代码可实现多轮问答。

  1. # coding: utf-8
  2. import _thread as thread
  3. import os
  4. import time
  5. import base64
  6. import base64
  7. import datetime
  8. import hashlib
  9. import hmac
  10. import json
  11. from urllib.parse import urlparse
  12. import ssl
  13. from datetime import datetime
  14. from time import mktime
  15. from urllib.parse import urlencode
  16. from wsgiref.handlers import format_date_time
  17. import websocket
  18. import openpyxl
  19. from concurrent.futures import ThreadPoolExecutor, as_completed
  20. import os
  21. class Ws_Param(object):
  22. # 初始化
  23. def __init__(self, APPID, APIKey, APISecret, gpt_url):
  24. self.APPID = APPID
  25. self.APIKey = APIKey
  26. self.APISecret = APISecret
  27. self.host = urlparse(gpt_url).netloc
  28. self.path = urlparse(gpt_url).path
  29. self.gpt_url = gpt_url
  30. # 生成url
  31. def create_url(self):
  32. # 生成RFC1123格式的时间戳
  33. now = datetime.now()
  34. date = format_date_time(mktime(now.timetuple()))
  35. # 拼接字符串
  36. signature_origin = "host: " + self.host + "\n"
  37. signature_origin += "date: " + date + "\n"
  38. signature_origin += "GET " + self.path + " HTTP/1.1"
  39. # 进行hmac-sha256进行加密
  40. signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
  41. digestmod=hashlib.sha256).digest()
  42. signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
  43. authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
  44. authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
  45. # 将请求的鉴权参数组合为字典
  46. v = {
  47. "authorization": authorization,
  48. "date": date,
  49. "host": self.host
  50. }
  51. # 拼接鉴权参数,生成url
  52. url = self.gpt_url + '?' + urlencode(v)
  53. # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
  54. return url
  55. # 收到websocket错误的处理
  56. def on_error(ws, error):
  57. print("### error:", error)
  58. # 收到websocket关闭的处理
  59. def on_close(ws):
  60. print("### closed ###")
  61. # 收到websocket连接建立的处理
  62. def on_open(ws):
  63. thread.start_new_thread(run, (ws,))
  64. def run(ws, *args):
  65. data = json.dumps(gen_params(appid=ws.appid, query=ws.query, domain=ws.domain))
  66. ws.send(data)
  67. # 收到websocket消息的处理
  68. def on_message(ws, message):
  69. data = json.loads(message)
  70. code = data['header']['code']
  71. if code != 0:
  72. print(f'请求错误: {code}, {data}')
  73. ws.close()
  74. else:
  75. choices = data["payload"]["choices"]
  76. status = choices["status"]
  77. content = choices["text"][0]["content"]
  78. print(content, end='')
  79. if status == 2:
  80. print("#### 关闭会话")
  81. ws.close()
  82. def gen_params(appid, query, domain):
  83. data = {
  84. "header": {
  85. "app_id": appid,
  86. "uid": "1234",
  87. },
  88. "parameter": {
  89. "chat": {
  90. "domain": domain,
  91. "temperature": 0.5,
  92. "max_tokens": 4096,
  93. "auditing": "default",
  94. }
  95. },
  96. "payload": {
  97. "message": {
  98. "text": [{"role": "user", "content": query}]
  99. }
  100. }
  101. }
  102. return data
  103. def main(appid, api_secret, api_key, gpt_url, domain, messages):
  104. wsParam = Ws_Param(appid, api_key, api_secret, gpt_url)
  105. websocket.enableTrace(False)
  106. wsUrl = wsParam.create_url()
  107. ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
  108. ws.appid = appid
  109. ws.query = messages[-1]["content"] # Send the latest user query
  110. ws.domain = domain
  111. ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
  112. import sparkAPI as LLM_API
  113. messages = []
  114. # 并入一条新的message,可以是用户的问题,也可以是大模型的回答
  115. def add_message(role, content):
  116. json_con = {
  117. "role": role,
  118. "content": content
  119. }
  120. messages.append(json_con)
  121. # 获取messages的长度,向大模型发送的问题token数量是有限制的
  122. def get_messages_length(messages):
  123. length = sum(len(message["content"]) for message in messages)
  124. return length
  125. # 检查token数量,确保不超过限制
  126. def check_messages_length(messages):
  127. while get_messages_length(messages) > 8000:
  128. messages.pop(0)
  129. return messages
  130. if __name__ == "__main__":
  131. # 准备用户API信息 以及访问的模型url
  132. appid = "1178223e"
  133. api_secret = "xxx"
  134. api_key = "xxx"
  135. gpt_url = "xxx"
  136. domain = "generalv3.5"
  137. messages.clear()
  138. while True: # 循环进行会话
  139. user_query = input("\nUser: ")
  140. # 将用户问题加入历史消息中
  141. add_message("user", user_query)
  142. # 检查消息长度并发送给大模型
  143. checked_messages = check_messages_length(messages)
  144. LLM_API.answer = ""
  145. print("\nAssistant:", end="")
  146. main(appid, api_secret, api_key, gpt_url, domain, checked_messages)
  147. # 将大模型返回的答案加入历史消息中
  148. add_message("assistant", LLM_API.answer)

运行结果展示:

阿里云通义千问模型:

打开网址:模型服务灵积 DashScope - 阿里云 (aliyun.com),立即开通,然后进入控制台,即可查看API信息。

多轮对话代码:

将API_KEY修改即可运行调用。

  1. from http import HTTPStatus
  2. import dashscope
  3. from dashscope import Generation
  4. from dashscope.api_entities.dashscope_response import Role
  5. dashscope.api_key = 'xxx' # 设置API_KEY
  6. def conversation_with_messages():
  7. messages = [{'role': Role.SYSTEM, 'content': 'You are a helpful assistant.'} ]
  8. # 循环实现多轮会话
  9. while True:
  10. prompt = input("USER:")
  11. # 添加新一轮会话用户的问题
  12. messages.append({'role': Role.USER, 'content': prompt})
  13. response = Generation.call(
  14. Generation.Models.qwen_turbo, #选择响应的模型
  15. messages=messages,
  16. result_format='message', # set the result to be "message" format.
  17. )
  18. if response.status_code == HTTPStatus.OK:
  19. for choice in response.output.choices:
  20. print(f"{choice['message']['role']}: {choice['message']['content']}")
  21. # 把模型的输出添加到messages中
  22. messages.append({'role': choice['message']['role'],
  23. 'content': choice['message']['content']})
  24. else:
  25. print('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
  26. response.request_id, response.status_code,
  27. response.code, response.message
  28. ))
  29. exit()
  30. if __name__ == '__main__':
  31. conversation_with_messages()

运行结果展示:

以上内容只是对于这两类大模型的简单直接调用,如需深入了解可参考其他大佬文章!加油!谢谢大家!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/672297
推荐阅读
相关标签
  

闽ICP备14008679号