hadadrjt commited on
Commit
5456854
·
0 Parent(s):

ai: Initial.

Browse files
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +10 -0
  3. app.py +124 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Jarvis AI
3
+ colorFrom: yellow
4
+ colorTo: purple
5
+ sdk: gradio
6
+ sdk_version: 5.22.0
7
+ app_file: app.py
8
+ pinned: false
9
+ short_description: Inspired by Iron Man movies.
10
+ ---
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import os
5
+ from dotenv import load_dotenv
6
+ import threading
7
+ import random
8
+ import time
9
+
10
+ LINUX_SERVER_HOST = os.getenv("LINUX_SERVER_HOST")
11
+ LINUX_SERVER_PROVIDER_KEY = [key for key in json.loads(os.getenv("LINUX_SERVER_PROVIDER_KEY", "[]")) if key]
12
+
13
+ AI_TYPES = {f"AI_TYPE_{i}": os.getenv(f"AI_TYPE_{i}") for i in range(1, 6)}
14
+ RESPONSES = {f"RESPONSE_{i}": os.getenv(f"RESPONSE_{i}") for i in range(1, 10)}
15
+
16
+ MODEL_MAPPING = json.loads(os.getenv("MODEL_MAPPING", "{}"))
17
+ MODEL_CONFIG = json.loads(os.getenv("MODEL_CONFIG", "{}"))
18
+ MODEL_CHOICES = list(MODEL_MAPPING.values())
19
+ DEFAULT_CONFIG = json.loads(os.getenv("DEFAULT_CONFIG", "{}"))
20
+
21
+ META_TAGS = os.getenv("META_TAGS")
22
+
23
+ stop_event = threading.Event()
24
+ session = requests.Session()
25
+
26
+ def get_model_key(display_name):
27
+ return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
28
+
29
+ def simulate_streaming_response(text):
30
+ for line in text.splitlines():
31
+ if stop_event.is_set():
32
+ return
33
+ yield line + "\n"
34
+ time.sleep(0.01)
35
+
36
+ def chat_with_model(history, user_input, selected_model_display):
37
+ if stop_event.is_set():
38
+ yield RESPONSES["RESPONSE_1"]
39
+ return
40
+
41
+ if not LINUX_SERVER_PROVIDER_KEY or not LINUX_SERVER_HOST:
42
+ yield RESPONSES["RESPONSE_3"]
43
+ return
44
+
45
+ selected_model = get_model_key(selected_model_display)
46
+ model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
47
+
48
+ messages = [{"role": "user", "content": user} for user, _ in history]
49
+ messages += [{"role": "assistant", "content": assistant} for _, assistant in history if assistant]
50
+ messages.append({"role": "user", "content": user_input})
51
+
52
+ data = {"model": selected_model, "messages": messages, **model_config}
53
+ random.shuffle(LINUX_SERVER_PROVIDER_KEY)
54
+
55
+ for api_key in LINUX_SERVER_PROVIDER_KEY[:2]:
56
+ if stop_event.is_set():
57
+ yield RESPONSES["RESPONSE_1"]
58
+ return
59
+ try:
60
+ response = session.post(LINUX_SERVER_HOST, json=data, headers={"Authorization": f"Bearer {api_key}"})
61
+ if stop_event.is_set():
62
+ yield RESPONSES["RESPONSE_1"]
63
+ return
64
+ if response.status_code < 400:
65
+ ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
66
+ yield from simulate_streaming_response(ai_text)
67
+ return
68
+ except requests.exceptions.RequestException:
69
+ continue
70
+
71
+ yield RESPONSES["RESPONSE_3"]
72
+
73
+ def respond(user_input, history, selected_model_display):
74
+ if not user_input.strip():
75
+ yield history, gr.update(value=""), gr.update(visible=False, interactive=False), gr.update(visible=True)
76
+ return
77
+
78
+ stop_event.clear()
79
+ history.append([user_input, RESPONSES["RESPONSE_8"]])
80
+
81
+ yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
82
+
83
+ ai_response = ""
84
+ for chunk in chat_with_model(history, user_input, selected_model_display):
85
+ if stop_event.is_set():
86
+ session.close()
87
+ history[-1][1] = RESPONSES["RESPONSE_1"]
88
+ yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
89
+ return
90
+ ai_response += chunk
91
+ history[-1][1] = ai_response
92
+ yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
93
+
94
+ session.close()
95
+ yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
96
+
97
+ def stop_response():
98
+ stop_event.set()
99
+ session.close()
100
+
101
+ def change_model(new_model_display):
102
+ return [], new_model_display
103
+
104
+ def check_send_button_enabled(msg):
105
+ return gr.update(visible=bool(msg.strip()), interactive=bool(msg.strip()))
106
+
107
+ with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as demo:
108
+ user_history = gr.State([])
109
+ selected_model = gr.State(MODEL_CHOICES[0])
110
+
111
+ chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], height=600, show_copy_button=True, show_share_button=False, elem_id=AI_TYPES["AI_TYPE_2"])
112
+ model_dropdown = gr.Dropdown(label=AI_TYPES["AI_TYPE_3"], show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0], interactive=True)
113
+ msg = gr.Textbox(label=RESPONSES["RESPONSE_4"], show_label=False, placeholder=RESPONSES["RESPONSE_5"])
114
+
115
+ with gr.Row():
116
+ send_btn = gr.Button(RESPONSES["RESPONSE_6"], visible=True, interactive=False)
117
+ stop_btn = gr.Button(RESPONSES["RESPONSE_7"], variant=RESPONSES["RESPONSE_9"], visible=False)
118
+
119
+ model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
120
+ send_btn.click(respond, inputs=[msg, user_history, selected_model], outputs=[chatbot, msg, send_btn, stop_btn])
121
+ msg.change(fn=check_send_button_enabled, inputs=[msg], outputs=[send_btn])
122
+ stop_btn.click(fn=stop_response, outputs=[send_btn, stop_btn])
123
+
124
+ demo.launch(share=True, show_api=False, favicon_path=AI_TYPES["AI_TYPE_5"])
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ huggingface_hub
2
+ optillm