File size: 4,296 Bytes
1413b56 74f2fec 6528cf4 fc7835b 840baf8 1413b56 c7375e2 7b2162d 1413b56 fe82302 ecdf02d cf5ad9a c81e1a9 83787a7 1413b56 8300a2c cf5ad9a c7375e2 840baf8 1413b56 d5c34d8 840baf8 1413b56 20f7f32 c81e1a9 cf5ad9a ecdf02d 1413b56 f2b6def 1402965 fde171a 1402965 f2b6def d706c35 1402965 b5e043e 3dc387b 1413b56 d706c35 7b2162d 840baf8 8e2ecdb 7b2162d 840baf8 7b2162d c1af167 1413b56 8e2ecdb 1413b56 8e2ecdb 1413b56 cd70b5f 1413b56 840baf8 1413b56 6a59997 1413b56 840baf8 1413b56 79d6899 1413b56 79d6899 1413b56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import os
import base64
import gradio as gr
from gradio_client import Client, file
import time
import json
import threading
MODEL_NAME = "QWEN"
client_chat = os.environ.get("CHAT_URL")
client_vl = os.environ.get("VL_URL")
def read(filename):
with open(filename) as f:
data = f.read()
return data
SYS_PROMPT = read('system_prompt.txt')
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">家庭医生demo</h1>
<p>🩺一个帮助您分析症状和检验报告的家庭医生(AI诊疗助手)。</p>
<p>🔎 选择您需要咨询的科室医生,在输入框中输入症状描述或者体检信息等;您也可以在图片框中上传检测报告图。</p>
<p>🦕 请注意生成信息可能不准确,且不具备任何实际参考价值,如有需要请联系专业医生。</p>
</div>
'''
css = """
h1 {
text-align: center;
display: block;
}
footer {
display:none !important
}
"""
LICENSE = '采用 ' + MODEL_NAME + ' 模型'
result = "等待分析..."
json_path = ""
def process_text(text_input, unit):
client = Client(client_chat)
print(client.view_api())
job = client.submit(
query=str(text_input),
history=None,
system=f"You are a experienced {unit} doctor AI assistant." + SYS_PROMPT,
api_name="/model_chat"
)
response = job.result()
print(response)
result = response[1][0][1]
return result
def process_image(image_input, unit):
global result, json_path
if image_input is not None:
image = str(image_input)
print(image)
#with open(image_input, "rb") as f:
# base64_image = base64.b64encode(f.read()).decode("utf-8")
client = Client(client_vl)
print(client.view_api())
prompt = f" You are a experienced {unit} doctor AI assistant." + SYS_PROMPT + "Help me understand what is in this picture and analysis."
res5 = client.predict(
"",
image,
fn_index=5
)
print(res5)
res0 = client.predict(
res5,
prompt,
fn_index=0
)
print(res0)
json_path = res0
def update():
job = client.submit(
json_path,
fn_index=1
)
response = job.result(timeout=60)
with open(response, 'r') as f:
data = json.load(f)
print(data)
result = data[-1][1]
threading.Thread(target=update).start()
return "等待分析..."
def fetch_result():
return result
def main(text_input="", image_input=None, unit=""):
if text_input and image_input is None:
return process_text(text_input, unit)
elif image_input is not None:
return process_image(image_input, unit)
with gr.Blocks(css=css, title="家庭医生AI助手") as iface:
with gr.Accordion(""):
gr.Markdown(DESCRIPTION)
unit = gr.Dropdown(label="🩺科室", value='中医科', elem_id="units",
choices=["中医科", "内科", "外科", "妇产科", "儿科", \
"五官科", "男科", "皮肤性病科", "传染科", "精神心理科", \
"整形美容科", "营养科", "生殖中心", "麻醉医学科", "医学影像科", \
"骨科", "肿瘤科", "急诊科", "检验科"])
with gr.Row():
output_box = gr.Markdown(value=result, every=10, label="分析", fn=fetch_result)
output_box.change(update)
with gr.Row():
image_input = gr.Image(type="filepath", label="上传图片") # Create an image upload button
text_input = gr.Textbox(label="输入") # Create a text input box
with gr.Row():
submit_btn = gr.Button("🚀 确认") # Create a submit button
clear_btn = gr.ClearButton([output_box, image_input, text_input], value="🗑️ 清空") # Create a clear button
# Set up the event listeners
submit_btn.click(main, inputs=[text_input, image_input, unit], outputs=output_box)
gr.Markdown(LICENSE)
#gr.close_all()
iface.queue().launch(show_api=False) # Launch the Gradio interface |