承德建设网站公司,郑州网站建设(智巢),wordpress插件实现响应式,网站内容规范2024年 最新python调用ChatGPT实战教程 文章目录 2024年 最新python调用ChatGPT实战教程一、前言二、具体分析1、简版程序2、多轮对话3、流式输出4、返回消耗的token 一、前言
这个之前经常用到#xff0c;简单记录一下,注意目前chatgpt 更新了#xff0c;这个是最新版的简单记录一下,注意目前chatgpt 更新了这个是最新版的如果不是最新版的,请自行升级。
二、具体分析
openai 安装
pip install openai 1、简版程序
该版本只有一轮
from openai import OpenAI
api_key your apikey
def openai_reply(content):client OpenAI(api_keyapi_key)chat_completion client.chat.completions.create(messages[{role: user,content: content,}],modelgpt-4-1106-preview,)return chat_completion.choices[0].message.contentif __name____main__:while True:content input(人类:)text1 openai_reply(content)print(AI: text1)2、多轮对话
这个版本有多轮核心是加入记忆
from openai import OpenAI
api_key your apikey
def openai_replys(memory):client OpenAI(api_keyapi_key)chat_completion client.chat.completions.create(messagesmemory, # 记忆modelgpt-4-1106-preview,)memory.append({role: assistant, content: chat_completion.choices[0].message.content})return chat_completion.choices[0].message.contentif __name____main__:memory[] # 上下轮记忆while True:content input(人类:)memory.append({role:user,content:content})text1 openai_replys(memory)print(AI: text1)程序输出
3、流式输出
这个版本有了流式输出让你看起来不是卡主了的样子
from openai import OpenAI
api_key your apikey
def openai_stream(memory):client OpenAI(api_keyapi_key)stream client.chat.completions.create(messagesmemory, # 记忆modelgpt-4-1106-preview,streamTrue,)return streamif __name____main__:memory[]while True:content input(人类:)memory.append({role:user,content:content})stream openai_stream(memory)print(AI:,end)aitextfor chunk in stream:if chunk.choices[0].delta.content is not None:print(chunk.choices[0].delta.content, end)aitextchunk.choices[0].delta.contentelse:print()memory.append({role:assistant,content:aitext})4、返回消耗的token
返回消耗的token
token类型解释completion_tokens输出tokenprompt_tokens输入tokentotal_tokens全部token
from openai import OpenAI
import tiktokendef calToken(memory,aitext,modelgpt-3.5-turbo):try:encoding tiktoken.encoding_for_model(model)except KeyError:print(Warning: model not found. Using cl100k_base encoding.)encoding tiktoken.get_encoding(cl100k_base)completion_tokens len(encoding.encode(aitext))prompt_tokens num_tokens_from_messages(memory, modelmodel)token_count completion_tokens prompt_tokensreturn {completion_tokens:completion_tokens, prompt_tokens:prompt_tokens, total_tokens:token_count}
def num_tokens_from_messages(messages, modelgpt-3.5-turbo):Returns the number of tokens used by a list of messages.try:encoding tiktoken.encoding_for_model(model)except KeyError:print(Warning: model not found. Using cl100k_base encoding.)encoding tiktoken.get_encoding(cl100k_base)tokens_per_message 8 # every message follows |start|{role/name}\n{content}|end|\ntokens_per_name -1 # if theres a name, the role is omittednum_tokens 0for message in messages:for key, value in message.items():if keycontent:num_tokens len(encoding.encode(value))if keyrole and valueuser:num_tokens tokens_per_messagenum_tokens tokens_per_name # every reply is primed with |start|assistant|message|return num_tokensapi_key your apikey
def openai_chat(memory):client OpenAI(api_keyapi_key)stream client.chat.completions.create(messagesmemory, # 记忆modelgpt-4-1106-preview,)print(total Token str(stream.usage))return stream.choices[0].message.contentif __name____main__:memory[] # 对话记忆while True:content input(人类:)memory.append({role:user,content:content}) #记忆里面填充用户输入aitext openai_chat(memory)print(AI:aitext)cocuscalToken(memory,aitext,modelgpt-4-1106-preview)print(消耗token:str(cocus))memory.append({role: assistant, content: aitext})