zenafey ddosxd commited on
Commit
f316a62
·
0 Parent(s):

Duplicate from ddosxd/sydney-gpt4-node-1

Browse files

Co-authored-by: Vova <[email protected]>

Files changed (5) hide show
  1. .gitattributes +35 -0
  2. Dockerfile +15 -0
  3. README.md +11 -0
  4. main.py +81 -0
  5. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Используй официальный образ Python
2
+ FROM python:3.8-slim
3
+
4
+ # Установи рабочую папку
5
+ WORKDIR /app
6
+
7
+ # Копируй файлы с требованиями и установи зависимости
8
+ COPY requirements.txt ./
9
+ RUN pip install --no-cache-dir -r requirements.txt
10
+
11
+ # Копируй все остальные файлы проекта в рабочую папку
12
+ COPY . .
13
+
14
+ # Запускаем бота
15
+ CMD ["python", "main.py"]
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Sydney Gpt4 Node 1
3
+ emoji: ⚡
4
+ colorFrom: red
5
+ colorTo: green
6
+ sdk: docker
7
+ pinned: false
8
+ duplicated_from: ddosxd/sydney-gpt4-node-1
9
+ ---
10
+
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
main.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai as closeai
2
+ from flask import Flask, request, Response
3
+ import os
4
+ from rich import print
5
+ import json
6
+ import requests
7
+ from time import sleep
8
+
9
+ settings = {
10
+ 'node': {
11
+ 'id':os.environ.get('nodeId'),
12
+ 'models': os.environ.get('nodeModel')
13
+ },
14
+ 'api': {
15
+ 'host': os.environ.get('apibase'),
16
+ 'key': os.environ.get('apikey')
17
+ },
18
+ 'security': {
19
+ 'passw':os.environ.get('apipassw')
20
+ },
21
+ 'web': {
22
+ 'port': os.environ.get('webport', 7860),
23
+ 'host': os.environ.get('webhost', '0.0.0.0'),
24
+ 'debug': os.environ.get('webdebug', False)
25
+ }
26
+ }
27
+
28
+ app = Flask(__name__)
29
+
30
+ closeai.api_base = settings["api"]["host"]
31
+ closeai.api_key = settings["api"]["key"]
32
+
33
+ @app.route("/")
34
+ def index():
35
+ return f'Hi, its a node {settings["node"]["id"]} with {settings["node"]["models"]}'
36
+
37
+ @app.route("/chat/completions", methods=['POST'])
38
+ def chat_completions():
39
+ streaming = request.json.get('stream', False)
40
+ model = request.json.get('model', 'gpt-4')
41
+ messages = request.json.get('messages')
42
+ response = ''
43
+
44
+ try:
45
+ response = closeai.ChatCompletion.create(model=model, stream=streaming, messages=messages)
46
+ except Exception as er:
47
+ print(er)
48
+ if '429' in str(er):sleep(30)
49
+ def errorStream(er):
50
+ yield 'data: %s\n\n' % json.dumps({"status":"!=200","error":str(er)}, separators=(',' ':'))
51
+ return app.response_class(errorStream(er), mimetype='text/event-stream')
52
+ if not streaming:
53
+
54
+ return {
55
+ 'model': model,
56
+ 'result': response["choices"][0]["message"]["content"]
57
+ }
58
+
59
+ def stream():
60
+ for token in response:
61
+ completion_data = {
62
+ 'model': model,
63
+ 'token': token,
64
+ 'status':200,
65
+ 'choices': [
66
+ {
67
+ 'delta': {
68
+ 'content': token
69
+ },
70
+ 'index': 0,
71
+ 'finish_reason': None
72
+ }
73
+ ]
74
+ }
75
+
76
+ yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
77
+
78
+ return app.response_class(stream(), mimetype='text/event-stream')
79
+
80
+ if __name__ == '__main__':
81
+ app.run(**settings['web'])
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ openai
2
+ flask
3
+ rich