File size: 1,509 Bytes
5bc1e51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import streamlit as st
from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast
import requests
from bs4 import BeautifulSoup

# KoBART ๋ชจ๋ธ ๋กœ๋”ฉ
model = BartForConditionalGeneration.from_pretrained("digit82/kobart-summarization")
tokenizer = PreTrainedTokenizerFast.from_pretrained("digit82/kobart-summarization")

# ๋‰ด์Šค ๋ณธ๋ฌธ ์ถ”์ถœ ํ•จ์ˆ˜
def get_article_text(naver_url):
    headers = {"User-Agent": "Mozilla/5.0"}
    response = requests.get(naver_url, headers=headers)
    soup = BeautifulSoup(response.text, "html.parser")
    article_body = soup.select_one("article") or soup.select_one("#dic_area")
    return article_body.get_text(strip=True) if article_body else None

# ๋‰ด์Šค ์š”์•ฝ ํ•จ์ˆ˜
def summarize_korean_news(text):
    input_ids = tokenizer.encode(text, return_tensors="pt", max_length=1024, truncation=True)
    summary_ids = model.generate(input_ids, max_length=128, min_length=30, num_beams=4, early_stopping=True)
    return tokenizer.decode(summary_ids[0], skip_special_tokens=True)

# Streamlit ์•ฑ UI
st.title("HL๋งŒ๋„ ๋‰ด์Šค ์š”์•ฝ๊ธฐ")
st.write("HL๋งŒ๋„ ๊ด€๋ จ ์ตœ์‹  ๋‰ด์Šค๋ฅผ ์š”์•ฝํ•ด ๋“œ๋ฆฝ๋‹ˆ๋‹ค!")

# ์‚ฌ์šฉ์ž๋กœ๋ถ€ํ„ฐ URL ์ž…๋ ฅ ๋ฐ›๊ธฐ
url = st.text_input("๋‰ด์Šค URL์„ ์ž…๋ ฅํ•˜์„ธ์š”:")

if url:
    article = get_article_text(url)
    if article:
        summary = summarize_korean_news(article)
        st.subheader("๋‰ด์Šค ์š”์•ฝ")
        st.write(summary)
    else:
        st.write("๊ธฐ์‚ฌ ๋ณธ๋ฌธ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค.")