Roberta2024 commited on
Commit
201f8fa
·
verified ·
1 Parent(s): 887d049

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -0
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """keyword_extraction"""
3
+
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ import pandas as pd
7
+ import jieba
8
+ from keybert import KeyBERT
9
+ from sklearn.feature_extraction.text import CountVectorizer
10
+ import streamlit as st
11
+ import matplotlib.pyplot as plt
12
+ from matplotlib.font_manager import FontProperties
13
+
14
+ # 下載字體
15
+ def download_font(url, save_path):
16
+ response = requests.get(url)
17
+ with open(save_path, 'wb') as f:
18
+ f.write(response.content)
19
+
20
+ # 字體URL和保存路徑
21
+ font_url = 'https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download'
22
+ font_path = 'TaipeiSansTCBeta-Regular.ttf'
23
+
24
+ # 下載字體
25
+ download_font(font_url, font_path)
26
+
27
+ # 設置字體
28
+ font_prop = FontProperties(fname=font_path)
29
+
30
+ # 抓取Yahoo新聞標題和內容
31
+ def fetch_yahoo_news(url):
32
+ response = requests.get(url)
33
+ web_content = response.content
34
+ soup = BeautifulSoup(web_content, 'html.parser')
35
+ title = soup.find('h1').text
36
+ content = soup.find('article').text
37
+ return title, content
38
+
39
+ # 斷詞函數
40
+ def jieba_tokenizer(text):
41
+ return jieba.lcut(text)
42
+
43
+ # 初始化CountVectorizer並定義KeyBERT模型
44
+ vectorizer = CountVectorizer(tokenizer=jieba_tokenizer)
45
+ kw_model = KeyBERT()
46
+
47
+ # 提取關鍵詞的函數
48
+ def extract_keywords(doc):
49
+ keywords = kw_model.extract_keywords(doc, vectorizer=vectorizer)
50
+ return keywords
51
+
52
+ # 畫圖函數
53
+ def plot_keywords(keywords, title):
54
+ words = [kw[0] for kw in keywords]
55
+ scores = [kw[1] for kw in keywords]
56
+
57
+ plt.figure(figsize=(10, 6))
58
+ bars = plt.barh(words, scores, color='skyblue', edgecolor='black', linewidth=1.2)
59
+ plt.xlabel('分數', fontproperties=font_prop, fontsize=14)
60
+ plt.title(title, fontproperties=font_prop, fontsize=16)
61
+ plt.gca().invert_yaxis() # 反轉Y軸,使得分數最高的關鍵詞在最上面
62
+ plt.xticks(fontproperties=font_prop, fontsize=12)
63
+ plt.yticks(fontproperties=font_prop, fontsize=12)
64
+ plt.grid(axis='x', linestyle='--', alpha=0.7)
65
+
66
+ # 添加分數標籤
67
+ for bar in bars:
68
+ plt.gca().text(bar.get_width() + 0.01, bar.get_y() + bar.get_height() / 2,
69
+ f'{bar.get_width():.4f}', va='center', ha='left', fontsize=12, fontproperties=font_prop)
70
+
71
+ st.pyplot(plt)
72
+
73
+ # 建立Streamlit網頁應用程式
74
+ st.title("中文關鍵詞提取工具")
75
+
76
+ # 抓取Yahoo新聞的URL輸入框
77
+ url = st.text_input("請輸入Yahoo新聞的URL:")
78
+
79
+ if st.button("抓取並提取關鍵詞"):
80
+ if url:
81
+ title, content = fetch_yahoo_news(url)
82
+ st.write("新聞標題:", title)
83
+ st.write("新聞內容:", content)
84
+
85
+ # 將內容轉為DataFrame
86
+ data = {'Title': [title], 'Content': [content]}
87
+ df = pd.DataFrame(data)
88
+ st.write("新聞內容的DataFrame:")
89
+ st.write(df)
90
+
91
+ # 提取關鍵詞
92
+ keywords = extract_keywords(content)
93
+ st.write("關鍵詞提取結果:")
94
+ for keyword in keywords:
95
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
96
+
97
+ plot_keywords(keywords, "關鍵詞提取結果")
98
+
99
+ # 使用另一個模型進行關鍵詞提取
100
+ kw_model_multilingual = KeyBERT(model='distiluse-base-multilingual-cased-v1')
101
+ keywords_multilingual = kw_model_multilingual.extract_keywords(content, vectorizer=vectorizer)
102
+ st.write("多語言模型關鍵詞提取結果:")
103
+ for keyword in keywords_multilingual:
104
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
105
+
106
+ plot_keywords(keywords_multilingual, "多語言模型關鍵詞提取結果")
107
+ else:
108
+ st.write("請輸入有效的Yahoo新聞URL。")