eaglelandsonce commited on
Commit
6a3bf81
·
verified ·
1 Parent(s): 2d115e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +238 -291
app.py CHANGED
@@ -1,321 +1,268 @@
1
- import io
2
  import os
3
- from typing import List, Tuple, Union
 
4
 
5
  import gradio as gr
6
  import nltk
7
 
8
- # -----------------------------------------------------------------------------
9
- # Force NLTK data into a local folder to avoid permissions/network issues
10
- # -----------------------------------------------------------------------------
11
- NLTK_DATA_DIR = os.path.join(os.path.dirname(__file__), "nltk_data")
12
- os.makedirs(NLTK_DATA_DIR, exist_ok=True)
13
- os.environ["NLTK_DATA"] = NLTK_DATA_DIR
14
- if NLTK_DATA_DIR not in nltk.data.path:
15
- nltk.data.path.insert(0, NLTK_DATA_DIR)
16
-
17
- # Cover old/new resource names across recent NLTK releases
18
- NLTK_PACKAGES = [
19
- # Tokenizers
20
- "punkt", "punkt_tab",
21
- # Stopwords / Lemmas
22
- "stopwords", "wordnet", "omw-1.4",
23
- # POS taggers (old and new english-specific)
24
- "averaged_perceptron_tagger", "averaged_perceptron_tagger_eng",
25
- # NE chunkers (old and new)
26
- "maxent_ne_chunker", "maxent_ne_chunker_tab",
27
- # Word lists used by NE chunker
28
- "words",
29
- ]
30
-
31
- def ensure_nltk_resources() -> str:
32
- msgs = []
33
- for pkg in NLTK_PACKAGES:
34
- try:
35
- # idempotent; will skip if already present
36
- ok = nltk.download(pkg, download_dir=NLTK_DATA_DIR, quiet=True)
37
- msgs.append(f"OK: {pkg}" if ok else f"Skipped: {pkg}")
38
- except Exception as e:
39
- msgs.append(f"Failed {pkg}: {e}")
40
- return " | ".join(msgs) if msgs else "Resources checked."
41
-
42
- # Import after setting up data path
43
- from nltk.tokenize import word_tokenize
44
  from nltk.corpus import stopwords
45
- from nltk.stem import PorterStemmer, WordNetLemmatizer
46
- from nltk import pos_tag
47
- from nltk.chunk import ne_chunk
48
 
 
49
 
50
- # -----------------------------------------------------------------------------
51
- # File reading helpers
52
- # -----------------------------------------------------------------------------
53
- def _read_bytes(path: str) -> bytes:
54
- with open(path, "rb") as f:
55
- return f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- def _extract_from_docx_bytes(b: bytes) -> str:
58
- try:
59
- import docx # python-docx
60
- except ImportError:
61
- return "ERROR: python-docx not installed. Add 'python-docx' to requirements.txt."
62
- f = io.BytesIO(b)
63
- doc = docx.Document(f)
64
- return "\n".join(p.text for p in doc.paragraphs)
65
-
66
- def _extract_from_doc_bytes(b: bytes) -> str:
67
  """
68
- Best-effort .doc (binary) support:
69
- - If 'textract' is installed, use it.
70
- - Otherwise, return a clear message telling the user to convert to .docx.
71
  """
72
- try:
73
- import textract # optional
74
- except Exception:
75
- return ("ERROR: .doc files require optional dependency 'textract' "
76
- "and system tools. Either `pip install textract` or convert "
77
- "the file to .docx and try again.")
78
- try:
79
- text = textract.process(io.BytesIO(b)) # may still fail if system tools missing
80
- return text.decode("utf-8", errors="replace")
81
- except Exception as e:
82
- return (f"ERROR: Could not extract text from .doc with textract: {e}. "
83
- "Please convert the file to .docx and try again.")
84
 
85
- def read_file(upload: Union[str, dict, "gr.File", None]) -> str:
 
 
 
86
  """
87
- Reads text from Gradio's File input. Supports .txt, .docx, and (optionally) .doc.
88
- Works if `upload` is a path (str), a dict, or a file-like with .name/.read().
 
 
 
89
  """
90
- if upload is None:
91
- return ""
92
-
93
- # Normalize to name/path/bytes
94
- name, path, content = None, None, None
95
-
96
- if isinstance(upload, str):
97
- path = upload
98
- name = os.path.basename(path)
99
- content = _read_bytes(path)
100
- elif isinstance(upload, dict):
101
- # gradio sometimes passes {'name': '/tmp/..', 'orig_name': 'foo.txt', ...}
102
- path = upload.get("name") or upload.get("path")
103
- name = upload.get("orig_name") or (os.path.basename(path) if path else "")
104
- if path and os.path.exists(path):
105
- content = _read_bytes(path)
106
- else:
107
- # file-like
108
- name = getattr(upload, "name", "") or ""
109
- path = getattr(upload, "name", None)
110
- try:
111
- if path and os.path.exists(path):
112
- content = _read_bytes(path)
113
- else:
114
- content = upload.read()
115
- except Exception:
116
- if path and os.path.exists(path):
117
- content = _read_bytes(path)
118
 
119
- if not name:
120
- name = "(uploaded)"
121
- if content is None:
122
- return "ERROR: Could not read uploaded file."
 
 
123
 
124
- ext = os.path.splitext(name)[1].lower()
 
 
 
125
 
126
- if ext == ".txt":
127
- # try common encodings
128
- for enc in ("utf-8", "utf-16", "latin-1"):
129
- try:
130
- return content.decode(enc)
131
- except UnicodeDecodeError:
132
- continue
133
- return "ERROR: Could not decode text file. Try UTF-8/plain text."
134
-
135
- if ext == ".docx":
136
- return _extract_from_docx_bytes(content)
137
-
138
- if ext == ".doc":
139
- return _extract_from_doc_bytes(content)
140
-
141
- return f"Unsupported file type: {ext}. Please upload .txt, .docx, or .doc."
142
-
143
-
144
- # -----------------------------------------------------------------------------
145
- # NLP helpers
146
- # -----------------------------------------------------------------------------
147
- def extract_ner(ne_tree) -> List[Tuple[str, str]]:
148
- entities = []
149
- for subtree in ne_tree:
150
- if hasattr(subtree, "label"):
151
- label = subtree.label()
152
- text = " ".join(token for token, _ in subtree.leaves())
153
- entities.append((text, label))
154
- return entities
155
-
156
- def process_text(raw_text: str, steps: List[str]) -> str:
157
- if not raw_text or raw_text.strip() == "":
158
- return "⚠️ No text provided."
159
-
160
- # Ensure data locally (quiet)
161
- ensure_nltk_resources()
162
-
163
- report_lines = []
164
- text = raw_text
165
-
166
- # 1) Tokenize (required by later steps)
167
- tokens = None
168
- if "Tokenize text." in steps or any(
169
- s in steps for s in [
170
- "Remove stopwords.", "Stem words.", "Lemmatize words.",
171
- "Tag parts of speech.", "Extract named entities."
172
- ]
173
- ):
174
- tokens = word_tokenize(text)
175
- if "Tokenize text." in steps:
176
- report_lines.append("### Tokens")
177
- report_lines.append(f"`{tokens}`\n")
178
-
179
- # 2) Stopwords
180
- filtered_tokens = tokens
181
- if "Remove stopwords." in steps:
182
- sw = set(stopwords.words("english"))
183
- filtered_tokens = [w for w in (tokens or []) if w.lower() not in sw]
184
- report_lines.append("### After Stopword Removal")
185
- report_lines.append(f"`{filtered_tokens}`\n")
186
-
187
- # 3) Stemming
188
- stemmed_tokens = filtered_tokens
189
- if "Stem words." in steps:
190
- stemmer = PorterStemmer()
191
- stemmed_tokens = [stemmer.stem(w) for w in (filtered_tokens or [])]
192
- report_lines.append("### Stemmed Tokens (Porter)")
193
- report_lines.append(f"`{stemmed_tokens}`\n")
194
-
195
- # 4) Lemmatization
196
- lemmatized_tokens = stemmed_tokens if stemmed_tokens is not None else filtered_tokens
197
- if "Lemmatize words." in steps:
198
- lemmatizer = WordNetLemmatizer()
199
- lemmatized_tokens = [lemmatizer.lemmatize(w) for w in (filtered_tokens or [])]
200
- report_lines.append("### Lemmatized Tokens (WordNet)")
201
- report_lines.append(f"`{lemmatized_tokens}`\n")
202
-
203
- # 5) POS Tagging
204
- pos_tags_val = None
205
- if "Tag parts of speech." in steps or "Extract named entities." in steps:
206
- base_for_tagging = lemmatized_tokens if lemmatized_tokens is not None else (tokens or [])
207
- pos_tags_val = pos_tag(base_for_tagging)
208
- if "Tag parts of speech." in steps:
209
- report_lines.append("### Part-of-Speech Tags")
210
- rows = ["| Token | POS |", "|---|---|"]
211
- rows += [f"| {t} | {p} |" for (t, p) in pos_tags_val]
212
- report_lines.append("\n".join(rows) + "\n")
213
-
214
- # 6) NER
215
- if "Extract named entities." in steps:
216
- if not pos_tags_val:
217
- base_for_tagging = lemmatized_tokens if lemmatized_tokens is not None else (tokens or [])
218
- pos_tags_val = pos_tag(base_for_tagging)
219
- ne_tree = ne_chunk(pos_tags_val, binary=False)
220
- ner_pairs = extract_ner(ne_tree)
221
-
222
- report_lines.append("### Named Entities")
223
- if ner_pairs:
224
- rows = ["| Entity | Label |", "|---|---|"]
225
- rows += [f"| {ent} | {lbl} |" for (ent, lbl) in ner_pairs]
226
- report_lines.append("\n".join(rows) + "\n")
227
- else:
228
- report_lines.append("_No named entities found._\n")
229
-
230
- return "\n".join(report_lines).strip() or "No steps selected."
231
-
232
-
233
- # -----------------------------------------------------------------------------
234
- # Gradio UI
235
- # -----------------------------------------------------------------------------
236
- MENU = [
237
- "Install and download required resources.",
238
- "Tokenize text.",
239
- "Remove stopwords.",
240
- "Stem words.",
241
- "Lemmatize words.",
242
- "Tag parts of speech.",
243
- "Extract named entities.",
244
- ]
245
-
246
- DEFAULT_TEXT = (
247
- "NLTK is a powerful library for text processing. "
248
- "Barack Obama served as the 44th President of the United States and lived in Washington, D.C."
249
- )
250
-
251
- with gr.Blocks(title="NLTK Text Processing Toolkit") as demo:
252
- gr.Markdown("# NLTK Text Processing Toolkit")
253
  gr.Markdown(
254
- "Type or paste text, or drop a `.txt`/`.docx`/`.doc` file. "
255
- "Select steps and click **Process**. Use **Install/Download Resources** first if needed."
 
 
 
 
 
 
 
 
 
 
256
  )
257
 
258
  with gr.Row():
259
- with gr.Column():
260
- text_in = gr.Textbox(
261
- label="Text Input",
262
- lines=10,
263
- value=DEFAULT_TEXT,
264
- placeholder="Type or paste text here..."
265
- )
266
- file_in = gr.File(
267
- label="...or drop a .txt / .docx / .doc file",
268
- file_types=[".txt", ".docx", ".doc"]
269
- )
270
- steps_in = gr.CheckboxGroup(
271
- choices=MENU,
272
- value=[
273
- "Tokenize text.",
274
- "Remove stopwords.",
275
- "Lemmatize words.",
276
- "Tag parts of speech.",
277
- "Extract named entities.",
278
- ],
279
- label="Menu (choose one or more)"
280
- )
281
- with gr.Row():
282
- install_btn = gr.Button("Install/Download Resources")
283
- process_btn = gr.Button("Process", variant="primary")
284
- clear_btn = gr.Button("Clear")
285
 
286
- with gr.Column():
287
- status_out = gr.Textbox(label="Status / Logs", interactive=False)
288
- result_out = gr.Markdown(label="Results")
289
 
290
- # Button callbacks
291
- def on_install():
292
- try:
293
- return ensure_nltk_resources()
294
- except Exception as e:
295
- return f"Install error: {e}"
296
 
297
- def on_process(text, file, steps):
298
- try:
299
- text = (text or "").strip()
300
- file_text = read_file(file) if file is not None else ""
301
- if not text and file_text:
302
- text = file_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
- if file_text.startswith("ERROR:") or file_text.startswith("Unsupported file type:"):
305
- return file_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
- return process_text(text, steps or [])
 
 
 
 
 
308
  except Exception:
309
- import traceback
310
- return "### Error\n```\n" + "".join(traceback.format_exc()) + "\n```"
311
-
312
- def on_clear():
313
- return "", ""
314
-
315
- install_btn.click(fn=on_install, inputs=None, outputs=status_out)
316
- process_btn.click(fn=on_process, inputs=[text_in, file_in, steps_in], outputs=result_out)
317
- clear_btn.click(fn=on_clear, inputs=None, outputs=[status_out, result_out])
318
 
319
  if __name__ == "__main__":
320
- # If you need external access, set server_name="0.0.0.0"
321
  demo.launch()
 
 
1
  import os
2
+ from collections import Counter
3
+ from typing import List, Tuple, Dict
4
 
5
  import gradio as gr
6
  import nltk
7
 
8
+ # Ensure NLTK resources are available at startup (safe to call repeatedly)
9
+ def _ensure_nltk():
10
+ try:
11
+ nltk.data.find("tokenizers/punkt")
12
+ except LookupError:
13
+ nltk.download("punkt", quiet=True)
14
+ try:
15
+ nltk.data.find("corpora/stopwords")
16
+ except LookupError:
17
+ nltk.download("stopwords", quiet=True)
18
+
19
+ _ensure_nltk()
20
+
21
+ from nltk.tokenize import sent_tokenize, word_tokenize
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  from nltk.corpus import stopwords
 
 
 
23
 
24
+ # ---------- Helpers ----------
25
 
26
+ def read_text_input(text: str, file_obj) -> str:
27
+ """
28
+ Priority: if a file is provided, read it; otherwise use text box.
29
+ Supports .txt and .docx (not legacy .doc).
30
+ """
31
+ if file_obj is not None:
32
+ path = file_obj.name if hasattr(file_obj, "name") else str(file_obj)
33
+ ext = os.path.splitext(path)[1].lower()
34
+ if ext == ".txt":
35
+ with open(path, "r", encoding="utf-8", errors="ignore") as f:
36
+ return f.read()
37
+ elif ext == ".docx":
38
+ try:
39
+ from docx import Document
40
+ except Exception as e:
41
+ return f"ERROR: python-docx not installed or failed to import: {e}"
42
+ try:
43
+ doc = Document(path)
44
+ return "\n".join(p.text for p in doc.paragraphs)
45
+ except Exception as e:
46
+ return f"ERROR reading .docx: {e}"
47
+ else:
48
+ return "ERROR: Unsupported file type. Please upload .txt or .docx."
49
+ return text or ""
50
 
51
+
52
+ def preprocess_tokens(tokens: List[str], clean: bool) -> List[str]:
 
 
 
 
 
 
 
 
53
  """
54
+ Optionally lowercases and removes English stopwords.
55
+ Leaves punctuation/nums as-is (tokenizer keeps them); the Bag of Words
56
+ will reflect exactly what remains after stopword filtering.
57
  """
58
+ if not clean:
59
+ return tokens
60
+ stops = set(stopwords.words("english"))
61
+ return [t.lower() for t in tokens if t.lower() not in stops]
 
 
 
 
 
 
 
 
62
 
63
+
64
+ def tokenize_pipeline(
65
+ raw_text: str, clean: bool
66
+ ) -> Tuple[List[str], List[List[str]], Counter, List[str]]:
67
  """
68
+ - Split text into sentences
69
+ - Tokenize each sentence into words
70
+ - (Optionally) lower + remove stopwords
71
+ - Build Bag of Words across the full text
72
+ Returns: sentences, tokenized_sentences, bow_counter, vocabulary_list
73
  """
74
+ if not raw_text.strip():
75
+ return [], [], Counter(), []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ sentences = sent_tokenize(raw_text)
78
+ tokenized_sentences = []
79
+ for s in sentences:
80
+ tokens = word_tokenize(s)
81
+ tokens = preprocess_tokens(tokens, clean=clean)
82
+ tokenized_sentences.append(tokens)
83
 
84
+ all_words = [w for sent in tokenized_sentences for w in sent]
85
+ bow = Counter(all_words)
86
+ vocabulary = sorted(bow.keys())
87
+ return sentences, tokenized_sentences, bow, vocabulary
88
 
89
+
90
+ def build_sentence_vector(
91
+ tokenized_sentences: List[List[str]], vocabulary: List[str], idx: int
92
+ ) -> Dict[str, int]:
93
+ """
94
+ Count occurrences of each vocab term inside the selected sentence.
95
+ Returns a {word: count} mapping (only non-zero entries for clarity).
96
+ """
97
+ if not tokenized_sentences or not vocabulary:
98
+ return {}
99
+
100
+ if idx < 0 or idx >= len(tokenized_sentences):
101
+ return {}
102
+
103
+ sent_tokens = tokenized_sentences[idx]
104
+ counts = Counter(sent_tokens)
105
+ vector = {word: counts[word] for word in vocabulary if counts[word] > 0}
106
+ return dict(sorted(vector.items(), key=lambda kv: (-kv[1], kv[0])))
107
+
108
+
109
+ # ---------- Gradio App ----------
110
+
111
+ SAMPLE_TEXT = """NLTK is a powerful library for text processing.
112
+ Text processing is essential for NLP tasks.
113
+ Bag of Words is a fundamental concept in NLP.
114
+ Tokenization splits sentences into words.
115
+ We can count word occurrences in text.
116
+ Word frequency vectors represent sentences numerically.
117
+ Vectorization helps in transforming text for machine learning.
118
+ Machine learning models can use BOW as input.
119
+ NLP tasks include classification and sentiment analysis.
120
+ Word frequency counts provide insight into text structure.
121
+ """
122
+
123
+ with gr.Blocks(title="NLTK: Tokenize Bag of Words → Sentence Vector") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  gr.Markdown(
125
+ """
126
+ # NLTK Mini-Workbench
127
+ Type/paste text or drop a **.txt** / **.docx** file.
128
+ Then click **Process** to:
129
+ 1) Install NLTK (auto-checked at startup)
130
+ 2) Tokenize sentences into words
131
+ 3) Count word occurrences (Bag of Words)
132
+ 4) Build a word-frequency vector for any selected sentence
133
+
134
+ **Option:** Toggle *Stopword removal + lowercasing* to get a cleaner Bag of Words.
135
+ > Note: Legacy `.doc` files are not supported—please convert to `.docx`.
136
+ """
137
  )
138
 
139
  with gr.Row():
140
+ text_in = gr.Textbox(
141
+ label="Input Text",
142
+ value=SAMPLE_TEXT,
143
+ lines=12,
144
+ placeholder="Paste text here, or upload a file instead...",
145
+ )
146
+ file_in = gr.File(
147
+ label="Or upload a file (.txt or .docx)",
148
+ file_types=[".txt", ".docx"],
149
+ type="filepath",
150
+ )
151
+
152
+ clean_opt = gr.Checkbox(
153
+ label="Stopword removal + lowercasing",
154
+ value=True,
155
+ info='Removes common English stopwords (e.g., "is", "for", "the") and lowercases tokens.',
156
+ )
 
 
 
 
 
 
 
 
 
157
 
158
+ process_btn = gr.Button("Process", variant="primary")
 
 
159
 
160
+ # Hidden state to carry processed artifacts between events
161
+ st_sentences = gr.State([])
162
+ st_tokenized = gr.State([])
163
+ st_vocab = gr.State([])
 
 
164
 
165
+ with gr.Row():
166
+ sentence_dropdown = gr.Dropdown(
167
+ choices=[],
168
+ label="Select a sentence to vectorize",
169
+ interactive=True,
170
+ )
171
+
172
+ with gr.Tab("Tokenized Sentences"):
173
+ tokenized_out = gr.JSON(label="Tokens per sentence")
174
+
175
+ with gr.Tab("Bag of Words"):
176
+ bow_df = gr.Dataframe(
177
+ headers=["word", "count"],
178
+ label="Bag of Words (sorted by count desc)",
179
+ interactive=False,
180
+ wrap=True,
181
+ )
182
+
183
+ with gr.Tab("Sentence Vector"):
184
+ vec_df = gr.Dataframe(
185
+ headers=["word", "count"],
186
+ label="Word-frequency vector for selected sentence",
187
+ interactive=False,
188
+ wrap=True,
189
+ )
190
+
191
+ # --------- Events ---------
192
+
193
+ def on_process(text, file, clean):
194
+ # Ensure required NLTK bits exist (esp. for fresh environments)
195
+ _ensure_nltk()
196
+
197
+ raw_text = read_text_input(text, file)
198
+ # If read_text_input returned an error string, pass it through gracefully
199
+ if raw_text.startswith("ERROR"):
200
+ return (
201
+ gr.update(choices=[], value=None),
202
+ [],
203
+ [],
204
+ [],
205
+ [],
206
+ [],
207
+ )
208
 
209
+ sentences, tokenized_sentences, bow, vocab = tokenize_pipeline(raw_text, clean)
210
+
211
+ # Prepare UI artifacts
212
+ # Sentence dropdown: "1: <first 60 chars>"
213
+ dd_choices = [f"{i+1}: {s[:60].strip()}{'...' if len(s) > 60 else ''}" for i, s in enumerate(sentences)]
214
+ dd_value = dd_choices[0] if dd_choices else None
215
+
216
+ tokenized_json = {f"Sentence {i+1}": tokens for i, tokens in enumerate(tokenized_sentences)}
217
+ bow_rows = sorted(bow.items(), key=lambda kv: (-kv[1], kv[0]))
218
+
219
+ # Build initial vector for sentence 1 if available
220
+ vector_rows = []
221
+ if tokenized_sentences and vocab:
222
+ vec_map = build_sentence_vector(tokenized_sentences, vocab, 0)
223
+ vector_rows = [[w, c] for w, c in vec_map.items()]
224
+
225
+ return (
226
+ gr.update(choices=dd_choices, value=dd_value),
227
+ tokenized_json,
228
+ [[w, c] for w, c in bow_rows],
229
+ vector_rows,
230
+ sentences,
231
+ tokenized_sentences,
232
+ vocab,
233
+ )
234
+
235
+ process_btn.click(
236
+ fn=on_process,
237
+ inputs=[text_in, file_in, clean_opt],
238
+ outputs=[
239
+ sentence_dropdown, # dropdown choices + value
240
+ tokenized_out, # JSON tokens
241
+ bow_df, # BOW table
242
+ vec_df, # initial vector table
243
+ st_sentences, # state: sentences
244
+ st_tokenized, # state: tokenized sentences
245
+ st_vocab, # state: vocabulary
246
+ ],
247
+ )
248
 
249
+ def on_select_sentence(choice: str, tokenized_sentences, vocabulary):
250
+ if not choice or not tokenized_sentences or not vocabulary:
251
+ return []
252
+ try:
253
+ # Choice looks like "3: <preview>"
254
+ idx = int(choice.split(":")[0]) - 1
255
  except Exception:
256
+ return []
257
+ vec_map = build_sentence_vector(tokenized_sentences, vocabulary, idx)
258
+ return [[w, c] for w, c in vec_map.items()]
259
+
260
+ sentence_dropdown.change(
261
+ fn=on_select_sentence,
262
+ inputs=[sentence_dropdown, st_tokenized, st_vocab],
263
+ outputs=[vec_df],
264
+ )
265
 
266
  if __name__ == "__main__":
267
+ # Launch on http://127.0.0.1:7860
268
  demo.launch()