Spaces:
Sleeping
Sleeping
Dmitry Trifonov
commited on
Commit
·
1e7a4d3
1
Parent(s):
bf413b8
update to work with new fair client
Browse files- text_to_image.py +10 -8
- unsafe.png +0 -0
text_to_image.py
CHANGED
@@ -13,8 +13,8 @@ from fair import FairClient
|
|
13 |
logger = logging.getLogger()
|
14 |
|
15 |
SERVER_ADDRESS = "https://faircompute.com:8000"
|
16 |
-
INFERENCE_NODE = "
|
17 |
-
TUNNEL_NODE = "
|
18 |
# SERVER_ADDRESS = "http://localhost:8000"
|
19 |
# INFERENCE_NODE = "ef09913249aa40ecba7d0097f7622855"
|
20 |
# TUNNEL_NODE = "c312e6c4788b00c73c287ab0445d3655"
|
@@ -68,7 +68,8 @@ def create_fair_client():
|
|
68 |
|
69 |
|
70 |
def create_endpoint_client(fc, retries, timeout=1.0, delay=2.0):
|
71 |
-
|
|
|
72 |
for i in range(retries):
|
73 |
try:
|
74 |
return EndpointClient(server_address, timeout=timeout)
|
@@ -94,14 +95,15 @@ bind_addr = "0.0.0.0:5000" # port that exposes service to the Internet
|
|
94 |
"""
|
95 |
with open('server.toml', 'w') as file:
|
96 |
file.write(server_config)
|
97 |
-
fc.run(
|
98 |
image=TUNNEL_DOCKER_IMAGE,
|
99 |
command=["--server", "/app/config.toml"],
|
100 |
volumes=[("./server.toml", "/app/config.toml")],
|
101 |
network="host",
|
102 |
detach=True)
|
103 |
|
104 |
-
|
|
|
105 |
client_config = f"""
|
106 |
[client]
|
107 |
remote_addr = "{server_address}:2333" # address of the rathole server
|
@@ -112,7 +114,7 @@ local_addr = "127.0.0.1:5001" # address of the service that needs
|
|
112 |
"""
|
113 |
with open('client.toml', 'w') as file:
|
114 |
file.write(client_config)
|
115 |
-
fc.run(
|
116 |
image=TUNNEL_DOCKER_IMAGE,
|
117 |
command=["--client", "/app/config.toml"],
|
118 |
volumes=[("./client.toml", "/app/config.toml")],
|
@@ -121,7 +123,7 @@ local_addr = "127.0.0.1:5001" # address of the service that needs
|
|
121 |
|
122 |
|
123 |
def start_inference_server(fc: FairClient):
|
124 |
-
fc.run(
|
125 |
image=INFERENCE_DOCKER_IMAGE,
|
126 |
runtime="nvidia",
|
127 |
ports=[(5001, 8000)],
|
@@ -141,7 +143,7 @@ def text_to_image(text):
|
|
141 |
# client is not configured, try connecting to the inference server, maybe it is running
|
142 |
else:
|
143 |
endpoint_client = create_endpoint_client(fair_client, 1)
|
144 |
-
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout):
|
145 |
# inference server is not ready, start inference server and open the tunnel
|
146 |
start_inference_server(fair_client)
|
147 |
start_tunnel(fair_client)
|
|
|
13 |
logger = logging.getLogger()
|
14 |
|
15 |
SERVER_ADDRESS = "https://faircompute.com:8000"
|
16 |
+
INFERENCE_NODE = "magnus"
|
17 |
+
TUNNEL_NODE = "gcs-e2-micro"
|
18 |
# SERVER_ADDRESS = "http://localhost:8000"
|
19 |
# INFERENCE_NODE = "ef09913249aa40ecba7d0097f7622855"
|
20 |
# TUNNEL_NODE = "c312e6c4788b00c73c287ab0445d3655"
|
|
|
68 |
|
69 |
|
70 |
def create_endpoint_client(fc, retries, timeout=1.0, delay=2.0):
|
71 |
+
nodes = fc.cluster().nodes.list()
|
72 |
+
server_address = next(info['host_address'] for info in nodes if info['name'] == TUNNEL_NODE)
|
73 |
for i in range(retries):
|
74 |
try:
|
75 |
return EndpointClient(server_address, timeout=timeout)
|
|
|
95 |
"""
|
96 |
with open('server.toml', 'w') as file:
|
97 |
file.write(server_config)
|
98 |
+
fc.run(node_name=TUNNEL_NODE,
|
99 |
image=TUNNEL_DOCKER_IMAGE,
|
100 |
command=["--server", "/app/config.toml"],
|
101 |
volumes=[("./server.toml", "/app/config.toml")],
|
102 |
network="host",
|
103 |
detach=True)
|
104 |
|
105 |
+
nodes = fc.cluster().nodes.list()
|
106 |
+
server_address = next(info['host_address'] for info in nodes if info['name'] == TUNNEL_NODE)
|
107 |
client_config = f"""
|
108 |
[client]
|
109 |
remote_addr = "{server_address}:2333" # address of the rathole server
|
|
|
114 |
"""
|
115 |
with open('client.toml', 'w') as file:
|
116 |
file.write(client_config)
|
117 |
+
fc.run(node_name=INFERENCE_NODE,
|
118 |
image=TUNNEL_DOCKER_IMAGE,
|
119 |
command=["--client", "/app/config.toml"],
|
120 |
volumes=[("./client.toml", "/app/config.toml")],
|
|
|
123 |
|
124 |
|
125 |
def start_inference_server(fc: FairClient):
|
126 |
+
fc.run(node_name=INFERENCE_NODE,
|
127 |
image=INFERENCE_DOCKER_IMAGE,
|
128 |
runtime="nvidia",
|
129 |
ports=[(5001, 8000)],
|
|
|
143 |
# client is not configured, try connecting to the inference server, maybe it is running
|
144 |
else:
|
145 |
endpoint_client = create_endpoint_client(fair_client, 1)
|
146 |
+
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout, ServerNotReadyException):
|
147 |
# inference server is not ready, start inference server and open the tunnel
|
148 |
start_inference_server(fair_client)
|
149 |
start_tunnel(fair_client)
|
unsafe.png
DELETED
Binary file (29.6 kB)
|
|