Niansuh commited on
Commit
19d4631
·
verified ·
1 Parent(s): 88bc5e3

Update api/provider/gizai.py

Browse files
Files changed (1) hide show
  1. api/provider/gizai.py +164 -135
api/provider/gizai.py CHANGED
@@ -1,154 +1,183 @@
1
- import json
2
  import uuid
3
- from typing import List, Dict, Any
4
  from datetime import datetime
 
 
5
 
6
- from aiohttp import ClientSession
 
 
7
  from api.logger import setup_logger
8
 
9
  logger = setup_logger(__name__)
10
 
11
- class GizAIProvider:
12
- # Class variables
13
- url = "https://app.giz.ai/assistant/"
14
- api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
15
- working = True
16
 
17
- supports_system_message = True
18
- supports_message_history = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- # Chat models
21
- default_model = 'chat-gemini-flash'
22
- chat_models = [
23
- default_model,
24
- 'chat-gemini-pro',
25
- 'chat-gpt4m',
26
- 'chat-gpt4',
27
- 'claude-sonnet',
28
- 'claude-haiku',
29
- 'llama-3-70b',
30
- 'llama-3-8b',
31
- 'mistral-large',
32
- 'chat-o1-mini'
33
- ]
34
 
35
- # Image models
36
- image_models = [
37
- 'flux1',
38
- 'sdxl',
39
- 'sd',
40
- 'sd35',
41
- ]
42
 
43
- models = [*chat_models, *image_models]
44
 
45
- model_aliases = {
46
- # Chat model aliases
47
- "gemini-flash": "chat-gemini-flash",
48
- "gemini-pro": "chat-gemini-pro",
49
- "gpt-4o-mini": "chat-gpt4m",
50
- "gpt-4o": "chat-gpt4",
51
- "claude-3.5-sonnet": "claude-sonnet",
52
- "claude-3-haiku": "claude-haiku",
53
- "llama-3.1-70b": "llama-3-70b",
54
- "llama-3.1-8b": "llama-3-8b",
55
- "o1-mini": "chat-o1-mini",
56
- # Image model aliases
57
- "sd-1.5": "sd",
58
- "sd-3.5": "sd35",
59
- "flux-schnell": "flux1",
60
- }
61
 
62
- @classmethod
63
- def get_model(cls, model: str) -> str:
64
- if model in cls.models:
65
- return model
66
- elif model in cls.model_aliases:
67
- return cls.model_aliases[model]
68
- else:
69
- return cls.default_model
70
 
71
- @classmethod
72
- def is_image_model(cls, model: str) -> bool:
73
- return model in cls.image_models
74
 
75
- @classmethod
76
- async def create_async_generator(
77
- cls,
78
- model: str,
79
- messages: List[Dict[str, Any]],
80
- max_tokens: int,
81
- top_p: float,
82
- temperature: float,
83
- stream: bool = True,
84
- **kwargs
85
- ):
86
- model = cls.get_model(model)
87
 
88
- headers = {
89
- 'Accept': 'application/json, text/plain, */*',
90
- 'Accept-Language': 'en-US,en;q=0.9',
91
- 'Cache-Control': 'no-cache',
92
- 'Connection': 'keep-alive',
93
- 'Content-Type': 'application/json',
94
- 'Origin': 'https://app.giz.ai',
95
- 'Pragma': 'no-cache',
96
- 'Sec-Fetch-Dest': 'empty',
97
- 'Sec-Fetch-Mode': 'cors',
98
- 'Sec-Fetch-Site': 'same-origin',
99
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
100
- '(KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
101
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
102
- 'sec-ch-ua-mobile': '?0',
103
- 'sec-ch-ua-platform': '"Linux"'
104
- }
105
 
106
- async with ClientSession() as session:
107
- if cls.is_image_model(model):
108
- # Image generation
109
- prompt = messages[-1]["content"]
110
- data = {
111
- "model": model,
112
- "input": {
113
- "width": "1024",
114
- "height": "1024",
115
- "steps": 4,
116
- "output_format": "webp",
117
- "batch_size": 1,
118
- "mode": "plan",
119
- "prompt": prompt
120
- }
121
  }
122
- async with session.post(
123
- cls.api_endpoint,
124
- headers=headers,
125
- data=json.dumps(data),
126
- ) as response:
127
- response.raise_for_status()
128
- response_data = await response.json()
129
- if response_data.get('status') == 'completed' and response_data.get('output'):
130
- for url in response_data['output']:
131
- yield {"images": url, "alt": "Generated Image"}
132
- else:
133
- # Chat completion
134
- data = {
135
- "model": model,
136
- "input": {
137
- "messages": [
138
- {
139
- "type": "human",
140
- "content": " ".join([msg['content'] for msg in messages])
141
- }
142
- ],
143
- "mode": "plan"
144
- },
145
- "noStream": True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  }
147
- async with session.post(
148
- cls.api_endpoint,
149
- headers=headers,
150
- data=json.dumps(data),
151
- ) as response:
152
- response.raise_for_status()
153
- result = await response.json()
154
- yield result.get('output', '')
 
 
1
  import uuid
 
2
  from datetime import datetime
3
+ import json
4
+ from typing import Any, Dict, Optional
5
 
6
+ import httpx
7
+ from fastapi import HTTPException
8
+ from api.models import ChatRequest
9
  from api.logger import setup_logger
10
 
11
  logger = setup_logger(__name__)
12
 
13
+ # Base URL for giz.ai
14
+ GIZAI_BASE_URL = "https://app.giz.ai"
15
+ GIZAI_API_ENDPOINT = f"{GIZAI_BASE_URL}/api/data/users/inferenceServer.infer"
 
 
16
 
17
+ # Headers for giz.ai
18
+ GIZAI_HEADERS = {
19
+ 'Accept': 'application/json, text/plain, */*',
20
+ 'Accept-Language': 'en-US,en;q=0.9',
21
+ 'Cache-Control': 'no-cache',
22
+ 'Connection': 'keep-alive',
23
+ 'Content-Type': 'application/json',
24
+ 'Origin': 'https://app.giz.ai',
25
+ 'Pragma': 'no-cache',
26
+ 'Sec-Fetch-Dest': 'empty',
27
+ 'Sec-Fetch-Mode': 'cors',
28
+ 'Sec-Fetch-Site': 'same-origin',
29
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
30
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
31
+ 'sec-ch-ua-mobile': '?0',
32
+ 'sec-ch-ua-platform': '"Linux"'
33
+ }
34
 
35
+ # List of models supported by giz.ai
36
+ GIZAI_CHAT_MODELS = [
37
+ 'chat-gemini-flash',
38
+ 'chat-gemini-pro',
39
+ 'chat-gpt4m',
40
+ 'chat-gpt4',
41
+ 'claude-sonnet',
42
+ 'claude-haiku',
43
+ 'llama-3-70b',
44
+ 'llama-3-8b',
45
+ 'mistral-large',
46
+ 'chat-o1-mini'
47
+ ]
 
48
 
49
+ GIZAI_IMAGE_MODELS = [
50
+ 'flux1',
51
+ 'sdxl',
52
+ 'sd',
53
+ 'sd35',
54
+ ]
 
55
 
56
+ GIZAI_MODELS = GIZAI_CHAT_MODELS + GIZAI_IMAGE_MODELS
57
 
58
+ GIZAI_MODEL_ALIASES = {
59
+ # Chat model aliases
60
+ "gemini-flash": "chat-gemini-flash",
61
+ "gemini-pro": "chat-gemini-pro",
62
+ "gpt-4o-mini": "chat-gpt4m",
63
+ "gpt-4o": "chat-gpt4",
64
+ "claude-3.5-sonnet": "claude-sonnet",
65
+ "claude-3-haiku": "claude-haiku",
66
+ "llama-3.1-70b": "llama-3-70b",
67
+ "llama-3.1-8b": "llama-3-8b",
68
+ "o1-mini": "chat-o1-mini",
69
+ # Image model aliases
70
+ "sd-1.5": "sd",
71
+ "sd-3.5": "sd35",
72
+ "flux-schnell": "flux1",
73
+ }
74
 
75
+ def get_gizai_model(model: str) -> str:
76
+ if model in GIZAI_MODELS:
77
+ return model
78
+ elif model in GIZAI_MODEL_ALIASES:
79
+ return GIZAI_MODEL_ALIASES[model]
80
+ else:
81
+ # Default model
82
+ return 'chat-gemini-flash'
83
 
84
+ def is_image_model(model: str) -> bool:
85
+ return model in GIZAI_IMAGE_MODELS
 
86
 
87
+ async def process_streaming_response(request: ChatRequest):
88
+ # giz.ai does not support streaming
89
+ # So we can raise an error or process as non-streaming
90
+ return await process_non_streaming_response(request)
 
 
 
 
 
 
 
 
91
 
92
+ async def process_non_streaming_response(request: ChatRequest):
93
+ model = get_gizai_model(request.model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ async with httpx.AsyncClient() as client:
96
+ if is_image_model(model):
97
+ # Image generation
98
+ prompt = request.messages[-1].content
99
+ data = {
100
+ "model": model,
101
+ "input": {
102
+ "width": "1024",
103
+ "height": "1024",
104
+ "steps": 4,
105
+ "output_format": "webp",
106
+ "batch_size": 1,
107
+ "mode": "plan",
108
+ "prompt": prompt
 
109
  }
110
+ }
111
+ try:
112
+ response = await client.post(
113
+ GIZAI_API_ENDPOINT,
114
+ headers=GIZAI_HEADERS,
115
+ json=data,
116
+ timeout=100,
117
+ )
118
+ response.raise_for_status()
119
+ response_data = response.json()
120
+ if response_data.get('status') == 'completed' and response_data.get('output'):
121
+ images = response_data['output']
122
+ # Return image response (e.g., URLs)
123
+ return {
124
+ "id": f"imggen-{uuid.uuid4()}",
125
+ "object": "image_generation",
126
+ "created": int(datetime.now().timestamp()),
127
+ "model": request.model,
128
+ "data": images,
129
+ }
130
+ else:
131
+ raise HTTPException(status_code=500, detail="Image generation failed")
132
+ except httpx.HTTPStatusError as e:
133
+ logger.error(f"HTTP error occurred: {e}")
134
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
135
+ except httpx.RequestError as e:
136
+ logger.error(f"Error occurred during request: {e}")
137
+ raise HTTPException(status_code=500, detail=str(e))
138
+ else:
139
+ # Chat completion
140
+ messages_content = "\n".join([f"{msg.role}: {msg.content}" for msg in request.messages])
141
+ data = {
142
+ "model": model,
143
+ "input": {
144
+ "messages": [
145
+ {
146
+ "type": "human",
147
+ "content": messages_content
148
+ }
149
+ ],
150
+ "mode": "plan"
151
+ },
152
+ "noStream": True
153
+ }
154
+ try:
155
+ response = await client.post(
156
+ GIZAI_API_ENDPOINT,
157
+ headers=GIZAI_HEADERS,
158
+ json=data,
159
+ timeout=100,
160
+ )
161
+ response.raise_for_status()
162
+ response_data = response.json()
163
+ output = response_data.get('output', '')
164
+ return {
165
+ "id": f"chatcmpl-{uuid.uuid4()}",
166
+ "object": "chat.completion",
167
+ "created": int(datetime.now().timestamp()),
168
+ "model": request.model,
169
+ "choices": [
170
+ {
171
+ "index": 0,
172
+ "message": {"role": "assistant", "content": output},
173
+ "finish_reason": "stop",
174
+ }
175
+ ],
176
+ "usage": None,
177
  }
178
+ except httpx.HTTPStatusError as e:
179
+ logger.error(f"HTTP error occurred: {e}")
180
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
181
+ except httpx.RequestError as e:
182
+ logger.error(f"Error occurred during request: {e}")
183
+ raise HTTPException(status_code=500, detail=str(e))