xwk123 commited on
Commit
692287e
·
verified ·
1 Parent(s): 339c335

Delete agent_for_ui/chatgpt.py

Browse files
Files changed (1) hide show
  1. agent_for_ui/chatgpt.py +0 -168
agent_for_ui/chatgpt.py DELETED
@@ -1,168 +0,0 @@
1
- import requests
2
- import os
3
- import openai
4
- import backoff
5
- import requests
6
- import time
7
- import json
8
- completion_tokens = prompt_tokens = 0
9
-
10
- # api_key = "sk-cTJqhJUkALz7qQbQ7a027280A1Df499883A2116bD8F1765d"
11
- # if api_key != "":
12
- # openai.api_key = api_key
13
- # else:
14
- # print("Warning: OPENAI_API_KEY is not set")
15
- #
16
- # api_base = os.getenv("OPENAI_API_BASE", "https://api.aiguoguo199.com/v1")
17
- # if api_base != "":
18
- # print("Warning: OPENAI_API_BASE is set to {}".format(api_base))
19
- # openai.api_base = api_base
20
- #
21
- #
22
- # # @backoff.on_exception(backoff.expo)
23
- # # def completions_with_backoff(**kwargs):
24
- # # return openai.ChatCompletion.create(**kwargs)
25
- # def chatgpt(prompt):
26
- # """
27
- #
28
- # """
29
- # response = openai.ChatCompletion.create(
30
- # model="gpt-3.5-turbo",
31
- # messages=[
32
- # # {'role': 'system', 'content': persona},
33
- # {'role': 'user', 'content': prompt}
34
- # ],
35
- # temperature=0.1,
36
- # max_tokens=1000,
37
- # n=1,
38
- # stop=None,
39
- # top_p=0.95
40
- # )
41
- # print("response: ", response)
42
- # return response
43
-
44
- # def gpt(prompt, model="gpt-3.5-turbo", temperature=0.7, max_tokens=1000, n=1, stop=None) -> list:
45
- # # 使用GPT模型生成对话,返回生成的消息列表
46
- # messages = [{"role": "user", "content": prompt}]
47
- # return chatgpt(messages, model=model, temperature=temperature, max_tokens=max_tokens, n=n, stop=stop)
48
-
49
-
50
- # def chatgpt(messages, model="gpt-3.5-turbo", temperature=0.7, max_tokens=1000, n=1, stop=None) -> list:
51
- # global completion_tokens, prompt_tokens
52
- # outputs = []
53
- # while n > 0:
54
- # # 限制每次请求的数量,以避免出现请求过多的情况
55
- # cnt = min(n, 20)
56
- # n -= cnt
57
- # # 使用backoff算法重试生成对话请求,返回生成的消息列表
58
- # res = completions_with_backoff(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens,
59
- # n=cnt, stop=stop)
60
- # '''
61
- # res是一个字典,包含从GPT模型返回的响应信息。res["choices"]是一个列表,其中包含GPT模型生成的若干个响应。每个响应是一个字典,包含以下键:
62
- # "text":一个字符串,表示GPT模型生成的响应文本。
63
- # "finish_reason":一个字符串,表示GPT模型停止生成响应的原因。
64
- # "index":一个整数,表示GPT模型生成响应的索引。
65
- # "logprobs":一个字典,表示GPT模型生成响应时的对数概率。
66
- # "tokens":一个列表,表示GPT模型生成响应时使用的令牌(标记)序列。
67
- # "message":一个字典,包含以下键:
68
- # "content":一个字符串,表示GPT模型生成的响应文本。
69
- # "metadata":一个字典,包含与响应相关的元数据。
70
- # '''
71
- # outputs.extend([choice["message"]["content"] for choice in res["choices"]])
72
- # # log completion tokens
73
- # # 记录使用的生成响应和接受提示使用的token数。
74
- # completion_tokens += res["usage"]["completion_tokens"]
75
- # prompt_tokens += res["usage"]["prompt_tokens"]
76
- # return outputs
77
-
78
- # ####################### Xiaomi API接口 ##########################
79
- # def chatgpt(prompt, model="gpt-4-turbo-2024-04-09", temperature=0.1, max_tokens=1000, n=1, stop=None) -> list:
80
- # headers = {'Content-Type': 'application/json'}
81
- # url = 'http://10.221.105.108:19005' # gpt-4/3.5 for ChatCompletion
82
- # outputs = []
83
- # cnt = min(n, 20)
84
- # while n > 0:
85
- # if model == "gpt-4-turbo-2024-04-09":
86
- # data = {"uid": "xx9ty", "prompt": prompt, "history": [], "model": "gpt-4-0314",
87
- # "max_tokens": max_tokens, "n": cnt, "temperature": temperature, "stop": stop} # if n>1, response of data is a List.
88
- # elif model == "gpt-3.5-turbo":
89
- # data = {"uid": "xx9ty", "prompt": prompt, "history": [],
90
- # "max_tokens": max_tokens, "n": cnt, "temperature": temperature, "stop": stop}
91
- # response = requests.post(url, json=data, headers=headers)
92
- # try:
93
- # res = response.json()
94
- # if isinstance(res['response'], list):
95
- # assert cnt > 1
96
- # outputs.extend(res['response'])
97
- # elif isinstance(res['response'], str):
98
- # assert cnt == 1
99
- # outputs.append(res['response'])
100
- # else:
101
- # print("None of outputs!")
102
- # n -= cnt
103
- # cnt = min(n, 20)
104
- # except Exception as e:
105
- # # print(e)
106
- # pass
107
- # # print(len(outputs))
108
- # # for i, output in enumerate(outputs):
109
- # # print(i, output)
110
- # # print("data", data)
111
- # return outputs
112
-
113
- def get_model_response(self, prompt, images=None):
114
- if images is None:
115
- images = []
116
- url = f"http://preview-general-llm.api.ai.srv/api/{self.model}/{self.user_name}"
117
- if self.model == 'gpt-4-turbo-2024-04-09':
118
- content = [
119
- {
120
- "type": 'text',
121
- "text": prompt
122
- }
123
- ]
124
- messages = [
125
- {
126
- "role": 'user',
127
- "content": content
128
- }
129
- ]
130
- else:
131
- print(f"Unsupported Model: {self.model}")
132
- assert False
133
- try_cnt = 0
134
- while try_cnt < 5:
135
- try_cnt += 1
136
- resp = requests.post(
137
- url=url,
138
- json={
139
- "messages": messages,
140
- "max_tokens": 1000,
141
- "temperature": 0.1,
142
- "seed": 1234,
143
- # "model": self.model,
144
- # "user_name": self.user_name
145
- })
146
- try:
147
- res = resp.json()
148
- if 'Error code' in res['response']:
149
- print("[API ERROR]!!! API Request Error!!!")
150
- # print_with_color(json.dumps(res), "red")
151
- time.sleep(20)
152
- continue
153
- return res['response']
154
- except Exception as e:
155
- print("[API ERROR]!!! Json Decode Error!!!")
156
- # print_with_color(resp.text, "red")
157
- time.sleep(20)
158
- continue
159
- return "API ERROR"
160
-
161
- def gpt_usage(backend="gpt-4"):
162
- # 根据使用的GPT模型计算使用的completion_tokens和prompt_tokens,并计算出费用
163
- global completion_tokens, prompt_tokens
164
- if backend == "gpt-4":
165
- cost = completion_tokens / 1000 * 0.06 + prompt_tokens / 1000 * 0.03
166
- elif backend == "gpt-3.5-turbo":
167
- cost = (completion_tokens + prompt_tokens) / 1000 * 0.0002
168
- return {"completion_tokens": completion_tokens, "prompt_tokens": prompt_tokens, "cost": cost}