Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,16 @@ from dotenv import load_dotenv
|
|
7 |
from flask import Flask, render_template, request
|
8 |
import logging
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
if os.getenv("HUGGINGFACE_HUB_CACHE") is None:
|
11 |
load_dotenv()
|
12 |
|
@@ -34,23 +44,23 @@ def index():
|
|
34 |
sentiment_count = None
|
35 |
|
36 |
current_directory = os.getcwd()
|
37 |
-
|
38 |
|
39 |
if request.method == "POST":
|
40 |
url = request.form["url"]
|
41 |
if url:
|
42 |
-
|
43 |
video_details = clustering.get_youtube_video_details(url, api_key)
|
44 |
comments_df = clustering.get_youtube_comments(api_key, url)
|
45 |
-
|
46 |
comments_df = clustering.add_normalized_embeddings_to_dataframe(
|
47 |
comments_df, "comment"
|
48 |
)
|
49 |
-
|
50 |
comments_df["published_at"] = pd.to_datetime(
|
51 |
comments_df["published_at"]
|
52 |
).dt.date
|
53 |
-
|
54 |
comments_df = clustering.classify_sentiment_df(comments_df)
|
55 |
comments_df.to_pickle(
|
56 |
"./data/Comentarios-Youtube/comments_df.pkl"
|
@@ -66,14 +76,14 @@ def index():
|
|
66 |
umap_df, min_eps, max_eps = clustering.transform_embeddings(
|
67 |
comments_df, embeddings_col="embeddings"
|
68 |
)
|
69 |
-
|
70 |
image_path = os.path.join("static", "wordcloud.png")
|
71 |
clustering.plot_wordcloud(comments_df, text_column="comment", output_filename=image_path)
|
72 |
|
73 |
total = comments_df.shape[0]
|
74 |
-
|
75 |
min_items_by_cluster = clustering.determine_min_items_by_cluster(total)
|
76 |
-
|
77 |
(
|
78 |
cluster_assignments,
|
79 |
cluster_counts,
|
@@ -86,7 +96,7 @@ def index():
|
|
86 |
threshold_values=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
|
87 |
embeddings_col="embeddings"
|
88 |
)
|
89 |
-
|
90 |
labels, source, target, values, comments = clustering.build_sankey_data(
|
91 |
cluster_assignments,
|
92 |
cluster_counts,
|
|
|
7 |
from flask import Flask, render_template, request
|
8 |
import logging
|
9 |
|
10 |
+
logging.basicConfig(
|
11 |
+
level=logging.DEBUG,
|
12 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
13 |
+
datefmt='%Y-%m-%d %H:%M:%S'
|
14 |
+
)
|
15 |
+
|
16 |
+
def log_message(message):
|
17 |
+
""""""
|
18 |
+
logging.info(message)
|
19 |
+
|
20 |
if os.getenv("HUGGINGFACE_HUB_CACHE") is None:
|
21 |
load_dotenv()
|
22 |
|
|
|
44 |
sentiment_count = None
|
45 |
|
46 |
current_directory = os.getcwd()
|
47 |
+
log_message("Iniciando procesamiento...")
|
48 |
|
49 |
if request.method == "POST":
|
50 |
url = request.form["url"]
|
51 |
if url:
|
52 |
+
log_message("Obteniendo datos de Youtube")
|
53 |
video_details = clustering.get_youtube_video_details(url, api_key)
|
54 |
comments_df = clustering.get_youtube_comments(api_key, url)
|
55 |
+
log_message("Generando embeddings")
|
56 |
comments_df = clustering.add_normalized_embeddings_to_dataframe(
|
57 |
comments_df, "comment"
|
58 |
)
|
59 |
+
log_message("Procesamiento de los datos")
|
60 |
comments_df["published_at"] = pd.to_datetime(
|
61 |
comments_df["published_at"]
|
62 |
).dt.date
|
63 |
+
log_message("Clasificaci贸n de los sentimientos")
|
64 |
comments_df = clustering.classify_sentiment_df(comments_df)
|
65 |
comments_df.to_pickle(
|
66 |
"./data/Comentarios-Youtube/comments_df.pkl"
|
|
|
76 |
umap_df, min_eps, max_eps = clustering.transform_embeddings(
|
77 |
comments_df, embeddings_col="embeddings"
|
78 |
)
|
79 |
+
log_message("Generaci贸n de wordcloud")
|
80 |
image_path = os.path.join("static", "wordcloud.png")
|
81 |
clustering.plot_wordcloud(comments_df, text_column="comment", output_filename=image_path)
|
82 |
|
83 |
total = comments_df.shape[0]
|
84 |
+
|
85 |
min_items_by_cluster = clustering.determine_min_items_by_cluster(total)
|
86 |
+
log_message("Modelado y generaci贸n de m茅tricas")
|
87 |
(
|
88 |
cluster_assignments,
|
89 |
cluster_counts,
|
|
|
96 |
threshold_values=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
|
97 |
embeddings_col="embeddings"
|
98 |
)
|
99 |
+
log_message("Creaci贸n de gr谩fico de Sankey")
|
100 |
labels, source, target, values, comments = clustering.build_sankey_data(
|
101 |
cluster_assignments,
|
102 |
cluster_counts,
|