|
|
|
"""keyword_extraction""" |
|
|
|
import requests |
|
from bs4 import BeautifulSoup |
|
import pandas as pd |
|
import jieba |
|
from keybert import KeyBERT |
|
from sklearn.feature_extraction.text import CountVectorizer |
|
import streamlit as st |
|
import matplotlib.pyplot as plt |
|
from matplotlib.font_manager import FontProperties |
|
|
|
|
|
def download_font(url, save_path): |
|
response = requests.get(url) |
|
with open(save_path, 'wb') as f: |
|
f.write(response.content) |
|
|
|
|
|
font_url = 'https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download' |
|
font_path = 'TaipeiSansTCBeta-Regular.ttf' |
|
|
|
|
|
download_font(font_url, font_path) |
|
|
|
|
|
font_prop = FontProperties(fname=font_path) |
|
|
|
|
|
def fetch_yahoo_news(url): |
|
response = requests.get(url) |
|
web_content = response.content |
|
soup = BeautifulSoup(web_content, 'html.parser') |
|
title = soup.find('h1').text |
|
content = soup.find('article').text |
|
return title, content |
|
|
|
|
|
def jieba_tokenizer(text): |
|
return jieba.lcut(text) |
|
|
|
|
|
vectorizer = CountVectorizer(tokenizer=jieba_tokenizer) |
|
kw_model = KeyBERT() |
|
|
|
|
|
def extract_keywords(doc): |
|
keywords = kw_model.extract_keywords(doc, vectorizer=vectorizer) |
|
return keywords |
|
|
|
|
|
def plot_keywords(keywords, title): |
|
words = [kw[0] for kw in keywords] |
|
scores = [kw[1] for kw in keywords] |
|
|
|
plt.figure(figsize=(10, 6)) |
|
bars = plt.barh(words, scores, color='skyblue', edgecolor='black', linewidth=1.2) |
|
plt.xlabel('分數', fontproperties=font_prop, fontsize=14) |
|
plt.title(title, fontproperties=font_prop, fontsize=16) |
|
plt.gca().invert_yaxis() |
|
plt.xticks(fontproperties=font_prop, fontsize=12) |
|
plt.yticks(fontproperties=font_prop, fontsize=12) |
|
plt.grid(axis='x', linestyle='--', alpha=0.7) |
|
|
|
|
|
for bar in bars: |
|
plt.gca().text(bar.get_width() + 0.01, bar.get_y() + bar.get_height() / 2, |
|
f'{bar.get_width():.4f}', va='center', ha='left', fontsize=12, fontproperties=font_prop) |
|
|
|
st.pyplot(plt) |
|
|
|
|
|
st.title("🤙🤙🤙YAHOO新聞關鍵詞提取工具👂👂") |
|
|
|
|
|
url = st.text_input("請輸入Yahoo新聞的URL:") |
|
|
|
if st.button("抓取並提取關鍵詞"): |
|
if url: |
|
title, content = fetch_yahoo_news(url) |
|
st.write("新聞標題:", title) |
|
st.write("新聞內容:", content) |
|
|
|
|
|
data = {'Title': [title], 'Content': [content]} |
|
df = pd.DataFrame(data) |
|
st.write("新聞內容的DataFrame:") |
|
st.write(df) |
|
|
|
|
|
keywords = extract_keywords(content) |
|
st.write("關鍵詞提取結果:") |
|
for keyword in keywords: |
|
st.write(f"{keyword[0]}: {keyword[1]:.4f}") |
|
|
|
plot_keywords(keywords, "關鍵詞提取結果") |
|
|
|
|
|
kw_model_multilingual = KeyBERT(model='distiluse-base-multilingual-cased-v1') |
|
keywords_multilingual = kw_model_multilingual.extract_keywords(content, vectorizer=vectorizer) |
|
st.write("多語言模型關鍵詞提取結果:") |
|
for keyword in keywords_multilingual: |
|
st.write(f"{keyword[0]}: {keyword[1]:.4f}") |
|
|
|
plot_keywords(keywords_multilingual, "多語言模型關鍵詞提取結果") |
|
else: |
|
st.write("請輸入有效的Yahoo新聞URL。") |
|
|