Roberta2024 commited on
Commit
2755a07
·
verified ·
1 Parent(s): b29f979

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -0
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import pandas as pd
4
+ import jieba
5
+ from keybert import KeyBERT
6
+ from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
7
+ import streamlit as st
8
+ import matplotlib.pyplot as plt
9
+ from matplotlib.font_manager import FontProperties
10
+ from wordcloud import WordCloud
11
+ from gensim import corpora, models
12
+
13
+ # 下載字體
14
+ def download_font(url, save_path):
15
+ response = requests.get(url)
16
+ with open(save_path, 'wb') as f:
17
+ f.write(response.content)
18
+
19
+ # 字體URL和保存路徑
20
+ font_url = 'https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download'
21
+ font_path = 'TaipeiSansTCBeta-Regular.ttf'
22
+
23
+ # 下載字體
24
+ download_font(font_url, font_path)
25
+
26
+ # 設置字體
27
+ font_prop = FontProperties(fname=font_path)
28
+
29
+ # 抓取Yahoo新聞標題和內容
30
+ def fetch_yahoo_news(url):
31
+ response = requests.get(url)
32
+ web_content = response.content
33
+ soup = BeautifulSoup(web_content, 'html.parser')
34
+ title = soup.find('h1').text
35
+ content = soup.find('article').text
36
+ return title, content
37
+
38
+ # 斷詞函數
39
+ def jieba_tokenizer(text):
40
+ return jieba.lcut(text)
41
+
42
+ # 初始化CountVectorizer並定義KeyBERT模型
43
+ vectorizer = CountVectorizer(tokenizer=jieba_tokenizer)
44
+ kw_model = KeyBERT()
45
+
46
+ # 提取關鍵詞的函數(使用MMR)
47
+ def extract_keywords(doc, diversity=0.7):
48
+ keywords = kw_model.extract_keywords(doc, vectorizer=vectorizer, use_mmr=True, diversity=diversity)
49
+ return keywords
50
+
51
+ # 畫圖函數
52
+ def plot_keywords(keywords, title):
53
+ words = [kw[0] for kw in keywords]
54
+ scores = [kw[1] for kw in keywords]
55
+
56
+ plt.figure(figsize=(10, 6))
57
+ bars = plt.barh(words, scores, color='skyblue', edgecolor='black', linewidth=1.2)
58
+ plt.xlabel('分數', fontproperties=font_prop, fontsize=14)
59
+ plt.title(title, fontproperties=font_prop, fontsize=16)
60
+ plt.gca().invert_yaxis() # 反轉Y軸,使得分數最高的關鍵詞在最上面
61
+ plt.xticks(fontproperties=font_prop, fontsize=12)
62
+ plt.yticks(fontproperties=font_prop, fontsize=12)
63
+ plt.grid(axis='x', linestyle='--', alpha=0.7)
64
+
65
+ # 添加分數標籤
66
+ for bar in bars:
67
+ plt.gca().text(bar.get_width() + 0.01, bar.get_y() + bar.get_height() / 2,
68
+ f'{bar.get_width():.4f}', va='center', ha='left', fontsize=12, fontproperties=font_prop)
69
+
70
+ st.pyplot(plt)
71
+
72
+ # 生成TF-IDF文字雲的函數
73
+ def plot_wordcloud(text):
74
+ tfidf_vectorizer = TfidfVectorizer(tokenizer=jieba_tokenizer)
75
+ tfidf_matrix = tfidf_vectorizer.fit_transform([text])
76
+ tfidf_scores = dict(zip(tfidf_vectorizer.get_feature_names_out(), tfidf_matrix.toarray().flatten()))
77
+
78
+ wordcloud = WordCloud(font_path=font_path, background_color='white', max_words=100, width=800, height=400)
79
+ wordcloud.generate_from_frequencies(tfidf_scores)
80
+
81
+ plt.figure(figsize=(10, 6))
82
+ plt.imshow(wordcloud, interpolation='bilinear')
83
+ plt.axis('off')
84
+ plt.title('TF-IDF文字雲', fontproperties=font_prop, fontsize=16)
85
+ st.pyplot(plt)
86
+
87
+ # LDA主題模型函數
88
+ def lda_topic_modeling(text, num_topics=5):
89
+ # 斷詞並創建字典和語料庫
90
+ tokens = jieba_tokenizer(text)
91
+ dictionary = corpora.Dictionary([tokens])
92
+ corpus = [dictionary.doc2bow(tokens)]
93
+
94
+ # 生成LDA模型
95
+ lda_model = models.LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=15)
96
+
97
+ # 提取主題
98
+ topics = lda_model.print_topics(num_words=5)
99
+ return topics
100
+
101
+ # 建立Streamlit網頁應用程式
102
+ st.title("🤙🤙🤙YAHOO新聞關鍵詞提取工具👂👂")
103
+
104
+ # 設置MMR多樣性參數
105
+ diversity = st.slider("選擇MMR多樣性參數", 0.0, 1.0, 0.7)
106
+
107
+ # 抓取Yahoo新聞的URL輸入框
108
+ url = st.text_input("請輸入Yahoo新聞的URL:")
109
+
110
+ if st.button("抓取並提取關鍵詞"):
111
+ if url:
112
+ title, content = fetch_yahoo_news(url)
113
+ st.write("新聞標題:", title)
114
+ st.write("新聞內容:", content)
115
+
116
+ # 將內容轉為DataFrame
117
+ data = {'Title': [title], 'Content': [content]}
118
+ df = pd.DataFrame(data)
119
+ st.write("新聞內容的DataFrame:")
120
+ st.write(df)
121
+
122
+ # 提取關鍵詞
123
+ keywords = extract_keywords(content, diversity=diversity)
124
+ st.write("關鍵詞提取結果:")
125
+ for keyword in keywords:
126
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
127
+
128
+ plot_keywords(keywords, "關鍵詞提取結果")
129
+
130
+ # 使用另一個模型進行關鍵詞提取
131
+ kw_model_multilingual = KeyBERT(model='distiluse-base-multilingual-cased-v1')
132
+ keywords_multilingual = kw_model_multilingual.extract_keywords(content, vectorizer=vectorizer, use_mmr=True, diversity=diversity)
133
+ st.write("多語言模型關鍵詞提取結果:")
134
+ for keyword in keywords_multilingual:
135
+ st.write(f"{keyword[0]}: {keyword[1]:.4f}")
136
+
137
+ plot_keywords(keywords_multilingual, "多語言模型關鍵詞提取���果")
138
+
139
+ # 生成TF-IDF文字雲
140
+ plot_wordcloud(content)
141
+
142
+ # LDA主題模型
143
+ num_topics = st.slider("選擇LDA主題數量", 1, 10, 5)
144
+ lda_topics = lda_topic_modeling(content, num_topics=num_topics)
145
+ st.write("LDA主題模型結果:")
146
+ for topic in lda_topics:
147
+ st.write(f"主題 {topic[0]}: {topic[1]}")
148
+ else:
149
+ st.write("請輸入有效的Yahoo新聞URL。")