zzh2004 commited on
Commit
8baa4ba
·
verified ·
1 Parent(s): cdc5248

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. README.md +2 -8
  2. demo.ipynb +47 -0
  3. demo.py +9 -0
  4. flagged/log.csv +3 -0
  5. requirements.txt +3 -0
  6. testvalble.py +2 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Test
3
- emoji: 🐢
4
- colorFrom: red
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 4.40.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: test
3
+ app_file: demo.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.40.0
 
 
6
  ---
 
 
demo.ipynb ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "f:\\miniconda\\envs\\transformers\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n",
14
+ "f:\\miniconda\\envs\\transformers\\lib\\site-packages\\transformers\\deepspeed.py:24: FutureWarning: transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations\n",
15
+ " warnings.warn(\n",
16
+ "f:\\miniconda\\envs\\transformers\\lib\\site-packages\\torchaudio\\backend\\utils.py:62: UserWarning: No audio backend is available.\n",
17
+ " warnings.warn(\"No audio backend is available.\")\n"
18
+ ]
19
+ }
20
+ ],
21
+ "source": [
22
+ "from transformers import *"
23
+ ]
24
+ }
25
+ ],
26
+ "metadata": {
27
+ "kernelspec": {
28
+ "display_name": "transformers",
29
+ "language": "python",
30
+ "name": "python3"
31
+ },
32
+ "language_info": {
33
+ "codemirror_mode": {
34
+ "name": "ipython",
35
+ "version": 3
36
+ },
37
+ "file_extension": ".py",
38
+ "mimetype": "text/x-python",
39
+ "name": "python",
40
+ "nbconvert_exporter": "python",
41
+ "pygments_lexer": "ipython3",
42
+ "version": "3.9.19"
43
+ }
44
+ },
45
+ "nbformat": 4,
46
+ "nbformat_minor": 2
47
+ }
demo.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # 创建一个pipeline对象,并指定使用GPU(设备编号为0)
5
+ qa_pipeline = pipeline("question-answering", model="uer/roberta-base-chinese-extractive-qa", device=0)
6
+
7
+ # 使用该pipeline创建Gradio接口并启动
8
+ interface = gr.Interface.from_pipeline(qa_pipeline)
9
+ interface.launch(share=True, server_name="0.0.0.0", server_port=8911)
flagged/log.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Context,Question,Answer,Score,flag,username,timestamp
2
+ 中国的首都是北京,中国的是首都是?,,"{""label"": null, ""confidences"": null}",,,2024-08-07 17:25:25.619859
3
+ 我是人,我是?,,"{""label"": null, ""confidences"": null}",,,2024-08-07 17:47:36.671893
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ y
2
+ y
3
+ y
testvalble.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import torch
2
+ print(torch.__version__)