File size: 2,997 Bytes
46427cb
522aea1
46427cb
 
522aea1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46427cb
 
 
 
 
 
 
 
 
 
522aea1
 
46427cb
 
 
 
 
 
 
 
522aea1
 
 
 
 
 
 
 
46427cb
522aea1
 
 
 
 
46427cb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import pandas as pd
import jieba
from keybert import KeyBERT
from sklearn.feature_extraction.text import CountVectorizer
import streamlit as st
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties

# 下載字體
def download_font(url, save_path):
    response = requests.get(url)
    with open(save_path, 'wb') as f:
        f.write(response.content)

# 字體URL和保存路徑
font_url = 'https://drive.google.com/uc?id=1eGAsTN1HBpJAkeVM57_C7ccp7hbgSz3_&export=download'
font_path = 'TaipeiSansTCBeta-Regular.ttf'

# 下載字體
download_font(font_url, font_path)

# 設置字體
font_prop = FontProperties(fname=font_path)

# 定義斷詞函數
def jieba_tokenizer(text):
    return jieba.lcut(text)

# 初始化CountVectorizer並定義KeyBERT模型
vectorizer = CountVectorizer(tokenizer=jieba_tokenizer)
kw_model = KeyBERT()

# 提取關鍵詞的函數
def extract_keywords(doc):
    keywords = kw_model.extract_keywords(doc, vectorizer=vectorizer)
    return keywords

# 畫圖函數
def plot_keywords(keywords, title):
    words = [kw[0] for kw in keywords]
    scores = [kw[1] for kw in keywords]

    plt.figure(figsize=(10, 6))
    plt.barh(words, scores, color='skyblue')
    plt.xlabel('分數', fontproperties=font_prop)
    plt.title(title, fontproperties=font_prop)
    plt.gca().invert_yaxis()  # 反轉Y軸,使得分數最高的關鍵詞在最上面
    plt.xticks(fontproperties=font_prop)
    plt.yticks(fontproperties=font_prop)
    st.pyplot(plt)

# Web scraping部分
def fetch_article(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.content, 'html.parser')
    title = soup.find('h1').get_text()
    content_paragraphs = soup.find_all('p')
    content = ' '.join([para.get_text() for para in content_paragraphs])
    return title, content

# Streamlit應用程式
st.title("中文關鍵詞提取工具")

url = st.text_input("請輸入Yahoo新聞文章的URL:")
if url:
    title, content = fetch_article(url)
    st.write("文章標題:", title)
    st.write("文章內容:", content)

    if st.button("提取關鍵詞"):
        keywords = extract_keywords(content)
        st.write("關鍵詞提取結果:")
        for keyword in keywords:
            st.write(f"{keyword[0]}: {keyword[1]:.4f}")
        
        plot_keywords(keywords, "關鍵詞提取結果")

        # 使用另一個模型進行關鍵詞提取
        kw_model_multilingual = KeyBERT(model='distiluse-base-multilingual-cased-v1')
        keywords_multilingual = kw_model_multilingual.extract_keywords(content, vectorizer=vectorizer)
        st.write("多語言模型關鍵詞提取結果:")
        for keyword in keywords_multilingual:
            st.write(f"{keyword[0]}: {keyword[1]:.4f}")
        
        plot_keywords(keywords_multilingual, "多語言模型關鍵詞提取結果")
else:
    st.write("請輸入文章的URL以進行關鍵詞提取。")