Roberta2024 commited on
Commit
defe86d
·
verified ·
1 Parent(s): 619ab0f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -0
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import pandas as pd
4
+ import jieba
5
+ from keybert import KeyBERT
6
+ from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
7
+ import streamlit as st
8
+ import matplotlib.pyplot as plt
9
+ from matplotlib.font_manager import FontProperties
10
+ from wordcloud import WordCloud
11
+
12
+ # 下載字體
13
+ def download_font(url, save_path):
14
+ response = requests.get(url)
15
+ with open(save_path, 'wb') as f:
16
+ f.write(response.content)
17
+
18
+ # 字體URL和保存路徑
19
+ font_url = 'https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download'
20
+ font_path = 'TaipeiSansTCBeta-Regular.ttf'
21
+
22
+ # 下載字體
23
+ download_font(font_url, font_path)
24
+
25
+ # 設置字體
26
+ font_prop = FontProperties(fname=font_path)
27
+
28
+ # 抓取Yahoo新聞標題和內容
29
+ def fetch_yahoo_news(url):
30
+ response = requests.get(url)
31
+ web_content = response.content
32
+ soup = BeautifulSoup(web_content, 'html.parser')
33
+ title = soup.find('h1').text
34
+ content = soup.find('article').text
35
+ return title, content
36
+
37
+ # 斷詞函數
38
+ def jieba_tokenizer(text):
39
+ return jieba.lcut(text)
40
+
41
+ # 初始化CountVectorizer並定義KeyBERT模型
42
+ vectorizer = CountVectorizer(tokenizer=jieba_tokenizer)
43
+ kw_model = KeyBERT()
44
+
45
+ # 提取關鍵詞的函數(使用MMR)
46
+ def extract_keywords(doc, diversity=0.7):
47
+ keywords = kw_model.extract_keywords(doc, vectorizer=vectorizer, use_mmr=True, diversity=diversity)
48
+ return keywords
49
+
50
+ # 畫圖函數
51
+ def plot_keywords(keywords, title):
52
+ words = [kw[0] for kw in keywords]
53
+ scores = [kw[1] for kw in keywords]
54
+
55
+ plt.figure(figsize=(10, 6))
56
+ bars = plt.barh(words, scores, color='skyblue', edgecolor='black', linewidth=1.2)
57
+ plt.xlabel('分數', fontproperties=font_prop, fontsize=14)
58
+ plt.title(title, fontproperties=font_prop, fontsize=16)
59
+ plt.gca().invert_yaxis() # 反轉Y軸,使得分數最高的關鍵詞在最上面
60
+ plt.xticks(fontproperties=font_prop, fontsize=12)
61
+ plt.yticks(fontproperties=font_prop, fontsize=12)
62
+ plt.grid(axis='x', linestyle='--', alpha=0.7)
63
+
64
+ # 添加分數標籤
65
+ for bar in bars:
66
+ plt.gca().text(bar.get_width() + 0.01, bar.get_y() + bar.get_height() / 2,
67
+ f'{bar.get_width():.4f}', va='center', ha='left', fontsize=12, fontproperties=font_prop)
68
+
69
+ st.pyplot(plt)
70
+
71
+ # 生成TF-IDF文字雲的函數
72
+ def plot_wordcloud(text):
73
+ tfidf_vectorizer = TfidfVectorizer(tokenizer=jieba_tokenizer)
74
+ tfidf_matrix = tfidf_vectorizer.fit_transform([text])
75
+ tfidf_scores = dict(zip(tfidf_vectorizer.get_feature_names_out(), tfidf_matrix.toarray().flatten()))
76
+
77
+ wordcloud = WordCloud(font_path=font_path, background_color='white', max_words=100, width=800, height=400)
78
+ wordcloud.generate_from_frequencies(tfidf_scores)
79
+
80
+ plt.figure(figsize=(10, 6))
81
+ plt.imshow(wordcloud, interpolation='bilinear')
82
+ plt.axis('off')
83
+ plt.title('TF-IDF文字雲', fontproperties=font_prop, fontsize=16)
84
+ st.pyplot(plt)
85
+
86
+ # 建立Streamlit網頁應用程式
87
+ st.title("🤙🤙🤙YAHOO新聞關鍵詞提取工具👂👂")
88
+
89
+ # 設置MMR多樣性參數
90
+ diversity = st.slider("選擇MMR多樣性參數", 0.0, 1.0, 0.7)
91
+
92
+ # 抓取Yahoo新聞的URL輸入框
93
+ url = st.text_input("請輸入Yahoo新聞的URL:")
94
+
95
+ if st.button("抓取並提取關鍵詞"):
96
+ if url:
97
+ title, content = fetch_yahoo_news(url)
98
+ st.write("新聞標題:", title)
99
+ st.write("新聞內容:", content)
100
+
101
+ # 將內容轉為DataFrame
102
+ data = {'Title': [title], 'Content': [content]}
103
+ df = pd.DataFrame(data)
104
+ st.write("新聞內容的DataFrame:")
105
+ st.write(df)
106
+
107
+ # 提取關鍵詞
108
+ keywords = extract_keywords(content, diversity=diversity)
109
+ st.write("關鍵詞提取結果:")
110
+ for keyword in keywords:
111
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
112
+
113
+ plot_keywords(keywords, "關鍵詞提取結果")
114
+
115
+ # 使用另一個模型進行關鍵詞提取
116
+ kw_model_multilingual = KeyBERT(model='distiluse-base-multilingual-cased-v1')
117
+ keywords_multilingual = kw_model_multilingual.extract_keywords(content, vectorizer=vectorizer, use_mmr=True, diversity=diversity)
118
+ st.write("多語言模型關鍵詞提取結果:")
119
+ for keyword in keywords_multilingual:
120
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
121
+
122
+ plot_keywords(keywords_multilingual, "多語言模型關鍵詞提取結果")
123
+
124
+ # 生成TF-IDF文字雲
125
+ plot_wordcloud(content)
126
+ else:
127
+ st.write("請輸入有效的Yahoo新聞URL。")