V1.5
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +75 -0
- app/app.py +0 -0
- app/run_app.py +64 -0
- app/utils/__init__.py +1 -0
- app/utils/file_utils.py +127 -0
- app/utils/image_processing.py +511 -0
- app/utils/model_loader.py +244 -0
- app/utils/onnx_processing.py +527 -0
- app/utils/ui_components.py +137 -0
- game/dev_tools.py +580 -0
- game/essence_generator.py +0 -0
- game/game.py +0 -0
- game/game_constants.py +205 -0
- game/library_system.py +2010 -0
- game/mosaics/templates/1st_costume_template.png +3 -0
- game/mosaics/templates/animal_crossing_template.png +3 -0
- game/mosaics/templates/arknights_template.png +3 -0
- game/mosaics/templates/azur_lane_template.png +3 -0
- game/mosaics/templates/blue_archive_template.png +3 -0
- game/mosaics/templates/boku_no_hero_academia_template.png +3 -0
- game/mosaics/templates/casual_template.png +3 -0
- game/mosaics/templates/chainsaw_man_template.png +3 -0
- game/mosaics/templates/character_extended_template.png +3 -0
- game/mosaics/templates/company_template.png +3 -0
- game/mosaics/templates/cosplay_template.png +3 -0
- game/mosaics/templates/disgaea_template.png +3 -0
- game/mosaics/templates/disney_template.png +3 -0
- game/mosaics/templates/dragon_ball_template.png +3 -0
- game/mosaics/templates/dungeon_and_fighter_template.png +3 -0
- game/mosaics/templates/elsword_template.png +3 -0
- game/mosaics/templates/emblem_template.png +3 -0
- game/mosaics/templates/ensemble_stars!_template.png +3 -0
- game/mosaics/templates/fate_template.png +3 -0
- game/mosaics/templates/ff14_template.png +3 -0
- game/mosaics/templates/fire_emblem_template.png +3 -0
- game/mosaics/templates/flower_template.png +3 -0
- game/mosaics/templates/food_template.png +3 -0
- game/mosaics/templates/genshin_impact_template.png +3 -0
- game/mosaics/templates/girls'_frontline_template.png +3 -0
- game/mosaics/templates/girls_und_panzer_template.png +3 -0
- game/mosaics/templates/granblue_fantasy_template.png +3 -0
- game/mosaics/templates/honkai_impact_template.png +3 -0
- game/mosaics/templates/honkai_star_rail_template.png +3 -0
- game/mosaics/templates/housamo_template.png +3 -0
- game/mosaics/templates/idolmaster_template.png +3 -0
- game/mosaics/templates/jojo_template.png +3 -0
- game/mosaics/templates/kancolle_template.png +3 -0
- game/mosaics/templates/kemono_friends_template.png +3 -0
- game/mosaics/templates/kirby_template.png +3 -0
- game/mosaics/templates/league_of_legends_template.png +3 -0
.gitattributes
CHANGED
@@ -38,3 +38,78 @@ images/training_monitor_predictions.png filter=lfs diff=lfs merge=lfs -text
|
|
38 |
images/training_monitor_selection.png filter=lfs diff=lfs merge=lfs -text
|
39 |
images/app_screenshot.png filter=lfs diff=lfs merge=lfs -text
|
40 |
images/tag_results_example.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
images/training_monitor_selection.png filter=lfs diff=lfs merge=lfs -text
|
39 |
images/app_screenshot.png filter=lfs diff=lfs merge=lfs -text
|
40 |
images/tag_results_example.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
game/mosaics/templates/1st_costume_template.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
game/mosaics/templates/animal_crossing_template.png filter=lfs diff=lfs merge=lfs -text
|
43 |
+
game/mosaics/templates/arknights_template.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
game/mosaics/templates/azur_lane_template.png filter=lfs diff=lfs merge=lfs -text
|
45 |
+
game/mosaics/templates/blue_archive_template.png filter=lfs diff=lfs merge=lfs -text
|
46 |
+
game/mosaics/templates/boku_no_hero_academia_template.png filter=lfs diff=lfs merge=lfs -text
|
47 |
+
game/mosaics/templates/casual_template.png filter=lfs diff=lfs merge=lfs -text
|
48 |
+
game/mosaics/templates/chainsaw_man_template.png filter=lfs diff=lfs merge=lfs -text
|
49 |
+
game/mosaics/templates/character_extended_template.png filter=lfs diff=lfs merge=lfs -text
|
50 |
+
game/mosaics/templates/company_template.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
game/mosaics/templates/cosplay_template.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
game/mosaics/templates/disgaea_template.png filter=lfs diff=lfs merge=lfs -text
|
53 |
+
game/mosaics/templates/disney_template.png filter=lfs diff=lfs merge=lfs -text
|
54 |
+
game/mosaics/templates/dragon_ball_template.png filter=lfs diff=lfs merge=lfs -text
|
55 |
+
game/mosaics/templates/dungeon_and_fighter_template.png filter=lfs diff=lfs merge=lfs -text
|
56 |
+
game/mosaics/templates/elsword_template.png filter=lfs diff=lfs merge=lfs -text
|
57 |
+
game/mosaics/templates/emblem_template.png filter=lfs diff=lfs merge=lfs -text
|
58 |
+
game/mosaics/templates/ensemble_stars!_template.png filter=lfs diff=lfs merge=lfs -text
|
59 |
+
game/mosaics/templates/fate_template.png filter=lfs diff=lfs merge=lfs -text
|
60 |
+
game/mosaics/templates/ff14_template.png filter=lfs diff=lfs merge=lfs -text
|
61 |
+
game/mosaics/templates/fire_emblem_template.png filter=lfs diff=lfs merge=lfs -text
|
62 |
+
game/mosaics/templates/flower_template.png filter=lfs diff=lfs merge=lfs -text
|
63 |
+
game/mosaics/templates/food_template.png filter=lfs diff=lfs merge=lfs -text
|
64 |
+
game/mosaics/templates/genshin_impact_template.png filter=lfs diff=lfs merge=lfs -text
|
65 |
+
game/mosaics/templates/girls_und_panzer_template.png filter=lfs diff=lfs merge=lfs -text
|
66 |
+
game/mosaics/templates/girls'_frontline_template.png filter=lfs diff=lfs merge=lfs -text
|
67 |
+
game/mosaics/templates/granblue_fantasy_template.png filter=lfs diff=lfs merge=lfs -text
|
68 |
+
game/mosaics/templates/honkai_impact_template.png filter=lfs diff=lfs merge=lfs -text
|
69 |
+
game/mosaics/templates/honkai_star_rail_template.png filter=lfs diff=lfs merge=lfs -text
|
70 |
+
game/mosaics/templates/housamo_template.png filter=lfs diff=lfs merge=lfs -text
|
71 |
+
game/mosaics/templates/idolmaster_template.png filter=lfs diff=lfs merge=lfs -text
|
72 |
+
game/mosaics/templates/jojo_template.png filter=lfs diff=lfs merge=lfs -text
|
73 |
+
game/mosaics/templates/kancolle_template.png filter=lfs diff=lfs merge=lfs -text
|
74 |
+
game/mosaics/templates/kemono_friends_template.png filter=lfs diff=lfs merge=lfs -text
|
75 |
+
game/mosaics/templates/kirby_template.png filter=lfs diff=lfs merge=lfs -text
|
76 |
+
game/mosaics/templates/league_of_legends_template.png filter=lfs diff=lfs merge=lfs -text
|
77 |
+
game/mosaics/templates/love_live!_template.png filter=lfs diff=lfs merge=lfs -text
|
78 |
+
game/mosaics/templates/madoka_magica_template.png filter=lfs diff=lfs merge=lfs -text
|
79 |
+
game/mosaics/templates/main_template.gif filter=lfs diff=lfs merge=lfs -text
|
80 |
+
game/mosaics/templates/manga_template.png filter=lfs diff=lfs merge=lfs -text
|
81 |
+
game/mosaics/templates/mega_man_template.png filter=lfs diff=lfs merge=lfs -text
|
82 |
+
game/mosaics/templates/meme_template.png filter=lfs diff=lfs merge=lfs -text
|
83 |
+
game/mosaics/templates/monster_girl_encyclopedia_template.png filter=lfs diff=lfs merge=lfs -text
|
84 |
+
game/mosaics/templates/naruto_template.png filter=lfs diff=lfs merge=lfs -text
|
85 |
+
game/mosaics/templates/new_year_template.png filter=lfs diff=lfs merge=lfs -text
|
86 |
+
game/mosaics/templates/nijisanji_template.png filter=lfs diff=lfs merge=lfs -text
|
87 |
+
game/mosaics/templates/nikke_template.png filter=lfs diff=lfs merge=lfs -text
|
88 |
+
game/mosaics/templates/omori_template.png filter=lfs diff=lfs merge=lfs -text
|
89 |
+
game/mosaics/templates/pokemon_template.png filter=lfs diff=lfs merge=lfs -text
|
90 |
+
game/mosaics/templates/precure_template.png filter=lfs diff=lfs merge=lfs -text
|
91 |
+
game/mosaics/templates/princess_connect!_template.png filter=lfs diff=lfs merge=lfs -text
|
92 |
+
game/mosaics/templates/punishing_gray_raven_template.png filter=lfs diff=lfs merge=lfs -text
|
93 |
+
game/mosaics/templates/ragnarok_online_template.png filter=lfs diff=lfs merge=lfs -text
|
94 |
+
game/mosaics/templates/sailor_moon_template.png filter=lfs diff=lfs merge=lfs -text
|
95 |
+
game/mosaics/templates/sao_template.png filter=lfs diff=lfs merge=lfs -text
|
96 |
+
game/mosaics/templates/sekaiju_template.png filter=lfs diff=lfs merge=lfs -text
|
97 |
+
game/mosaics/templates/senran_kagura_template.png filter=lfs diff=lfs merge=lfs -text
|
98 |
+
game/mosaics/templates/series_template.png filter=lfs diff=lfs merge=lfs -text
|
99 |
+
game/mosaics/templates/show_by_rock!!_template.png filter=lfs diff=lfs merge=lfs -text
|
100 |
+
game/mosaics/templates/skullgirls_template.png filter=lfs diff=lfs merge=lfs -text
|
101 |
+
game/mosaics/templates/sousou_no_frierem_template.png filter=lfs diff=lfs merge=lfs -text
|
102 |
+
game/mosaics/templates/splatoon_template.png filter=lfs diff=lfs merge=lfs -text
|
103 |
+
game/mosaics/templates/stand_template.png filter=lfs diff=lfs merge=lfs -text
|
104 |
+
game/mosaics/templates/street_fighter_template.png filter=lfs diff=lfs merge=lfs -text
|
105 |
+
game/mosaics/templates/style_template.png filter=lfs diff=lfs merge=lfs -text
|
106 |
+
game/mosaics/templates/symbol_template.png filter=lfs diff=lfs merge=lfs -text
|
107 |
+
game/mosaics/templates/tarot_template.png filter=lfs diff=lfs merge=lfs -text
|
108 |
+
game/mosaics/templates/tf2_template.png filter=lfs diff=lfs merge=lfs -text
|
109 |
+
game/mosaics/templates/umamusume_template.png filter=lfs diff=lfs merge=lfs -text
|
110 |
+
game/mosaics/templates/vocaloid_template.png filter=lfs diff=lfs merge=lfs -text
|
111 |
+
game/mosaics/templates/vtuber_template.png filter=lfs diff=lfs merge=lfs -text
|
112 |
+
game/mosaics/templates/warship_girls_r_template.png filter=lfs diff=lfs merge=lfs -text
|
113 |
+
game/mosaics/templates/weapon_template.png filter=lfs diff=lfs merge=lfs -text
|
114 |
+
game/mosaics/templates/wuthering_wave_template.png filter=lfs diff=lfs merge=lfs -text
|
115 |
+
game/mosaics/templates/xenoblade_template.png filter=lfs diff=lfs merge=lfs -text
|
app/app.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app/run_app.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Launcher script for the Image Tagger application.
|
4 |
+
"""
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
import subprocess
|
8 |
+
import webbrowser
|
9 |
+
import time
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
def run_app():
|
13 |
+
"""Run the Streamlit app"""
|
14 |
+
# Check if app.py exists
|
15 |
+
app_path = "app.py"
|
16 |
+
if not os.path.exists(app_path):
|
17 |
+
print(f"Error: {app_path} not found")
|
18 |
+
return False
|
19 |
+
|
20 |
+
# Get parent directory path (where venv is located)
|
21 |
+
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
22 |
+
|
23 |
+
# Get the path to streamlit in the virtual environment (in parent directory)
|
24 |
+
if sys.platform == "win32":
|
25 |
+
streamlit_path = os.path.join(parent_dir, "venv", "Scripts", "streamlit.exe")
|
26 |
+
else:
|
27 |
+
streamlit_path = os.path.join(parent_dir, "venv", "bin", "streamlit")
|
28 |
+
|
29 |
+
if not os.path.exists(streamlit_path):
|
30 |
+
print(f"Error: Streamlit not found at {streamlit_path}")
|
31 |
+
print("Make sure you've run setup.py first to create the virtual environment")
|
32 |
+
return False
|
33 |
+
|
34 |
+
print("=" * 60)
|
35 |
+
print(" Starting Image Tagger Application")
|
36 |
+
print("=" * 60)
|
37 |
+
|
38 |
+
print("\nLaunching the web interface...")
|
39 |
+
|
40 |
+
# Create a directory for example images if it doesn't exist
|
41 |
+
examples_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "examples")
|
42 |
+
os.makedirs(examples_dir, exist_ok=True)
|
43 |
+
|
44 |
+
# Check if there are example images
|
45 |
+
example_files = [f for f in os.listdir(examples_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
46 |
+
if not example_files:
|
47 |
+
print("\nTip: Add some example images to the 'examples' directory for testing")
|
48 |
+
|
49 |
+
# Run Streamlit app - using streamlit's built-in browser opening
|
50 |
+
# This avoids the double browser opening issue
|
51 |
+
try:
|
52 |
+
command = [streamlit_path, "run", app_path]
|
53 |
+
subprocess.run(command, check=True)
|
54 |
+
return True
|
55 |
+
except subprocess.CalledProcessError as e:
|
56 |
+
print(f"Error running the app: {e}")
|
57 |
+
return False
|
58 |
+
except KeyboardInterrupt:
|
59 |
+
print("\nApplication stopped by user")
|
60 |
+
return True
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
success = run_app()
|
64 |
+
sys.exit(0 if success else 1)
|
app/utils/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# Make utils a proper Python package
|
app/utils/file_utils.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File utilities for Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
|
8 |
+
def save_tags_to_file(image_path, all_tags, original_filename=None, custom_dir=None, overwrite=False):
|
9 |
+
"""
|
10 |
+
Save tags to a text file in a dedicated 'saved_tags' folder or custom directory.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
image_path: Path to the original image
|
14 |
+
all_tags: List of all tags to save
|
15 |
+
original_filename: Original filename if uploaded through Streamlit
|
16 |
+
custom_dir: Custom directory to save tags to (if None, uses 'saved_tags' folder)
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
Path to the saved file
|
20 |
+
"""
|
21 |
+
# Determine the save directory
|
22 |
+
if custom_dir and os.path.isdir(custom_dir):
|
23 |
+
save_dir = custom_dir
|
24 |
+
else:
|
25 |
+
# Create a dedicated folder for saved tags in the app's root directory
|
26 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
27 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
28 |
+
|
29 |
+
# Ensure the directory exists
|
30 |
+
os.makedirs(save_dir, exist_ok=True)
|
31 |
+
|
32 |
+
# Determine the filename
|
33 |
+
if original_filename:
|
34 |
+
# For uploaded files, use original filename
|
35 |
+
base_name = os.path.splitext(original_filename)[0]
|
36 |
+
else:
|
37 |
+
# For non-uploaded files, use the image path
|
38 |
+
base_name = os.path.splitext(os.path.basename(image_path))[0]
|
39 |
+
|
40 |
+
# Create the output path
|
41 |
+
output_path = os.path.join(save_dir, f"{base_name}.txt")
|
42 |
+
|
43 |
+
# If overwrite is False and file exists, add a timestamp to avoid overwriting
|
44 |
+
if not overwrite and os.path.exists(output_path):
|
45 |
+
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
46 |
+
output_path = os.path.join(save_dir, f"{base_name}_{timestamp}.txt")
|
47 |
+
|
48 |
+
# Write the tags to file
|
49 |
+
with open(output_path, 'w', encoding='utf-8') as f:
|
50 |
+
if all_tags:
|
51 |
+
# Add comma after each tag including the last one
|
52 |
+
tag_text = ", ".join(all_tags) + ","
|
53 |
+
f.write(tag_text)
|
54 |
+
|
55 |
+
return output_path
|
56 |
+
|
57 |
+
def get_default_save_locations():
|
58 |
+
"""
|
59 |
+
Get default save locations for tag files.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
List of default save locations
|
63 |
+
"""
|
64 |
+
# App directory
|
65 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
66 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
67 |
+
|
68 |
+
# Common user directories
|
69 |
+
desktop_dir = os.path.expanduser("~/Desktop")
|
70 |
+
download_dir = os.path.expanduser("~/Downloads")
|
71 |
+
documents_dir = os.path.expanduser("~/Documents")
|
72 |
+
|
73 |
+
# List of default save locations
|
74 |
+
save_locations = [
|
75 |
+
save_dir,
|
76 |
+
desktop_dir,
|
77 |
+
download_dir,
|
78 |
+
documents_dir,
|
79 |
+
]
|
80 |
+
|
81 |
+
# Ensure directories exist
|
82 |
+
for folder in save_locations:
|
83 |
+
os.makedirs(folder, exist_ok=True)
|
84 |
+
|
85 |
+
return save_locations
|
86 |
+
|
87 |
+
def apply_category_limits(result, category_limits):
|
88 |
+
"""
|
89 |
+
Apply category limits to a result dictionary.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
result: Result dictionary containing tags and all_tags
|
93 |
+
category_limits: Dictionary mapping categories to their tag limits
|
94 |
+
(0 = exclude category, -1 = no limit/include all)
|
95 |
+
|
96 |
+
Returns:
|
97 |
+
Updated result dictionary with limits applied
|
98 |
+
"""
|
99 |
+
if not category_limits or not result['success']:
|
100 |
+
return result
|
101 |
+
|
102 |
+
# Get the filtered tags
|
103 |
+
filtered_tags = result['tags']
|
104 |
+
|
105 |
+
# Apply limits to each category
|
106 |
+
for category, cat_tags in list(filtered_tags.items()):
|
107 |
+
# Get limit for this category, default to -1 (no limit)
|
108 |
+
limit = category_limits.get(category, -1)
|
109 |
+
|
110 |
+
if limit == 0:
|
111 |
+
# Exclude this category entirely
|
112 |
+
del filtered_tags[category]
|
113 |
+
elif limit > 0 and len(cat_tags) > limit:
|
114 |
+
# Limit to top N tags for this category
|
115 |
+
filtered_tags[category] = cat_tags[:limit]
|
116 |
+
|
117 |
+
# Regenerate all_tags list after applying limits
|
118 |
+
all_tags = []
|
119 |
+
for category, cat_tags in filtered_tags.items():
|
120 |
+
for tag, _ in cat_tags:
|
121 |
+
all_tags.append(tag)
|
122 |
+
|
123 |
+
# Update the result with limited tags
|
124 |
+
result['tags'] = filtered_tags
|
125 |
+
result['all_tags'] = all_tags
|
126 |
+
|
127 |
+
return result
|
app/utils/image_processing.py
ADDED
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Image processing functions for the Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import traceback
|
7 |
+
import glob
|
8 |
+
|
9 |
+
|
10 |
+
def process_image(image_path, model, thresholds, metadata, threshold_profile, active_threshold, active_category_thresholds, min_confidence=0.1):
|
11 |
+
"""
|
12 |
+
Process a single image and return the tags.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
image_path: Path to the image
|
16 |
+
model: The image tagger model
|
17 |
+
thresholds: Thresholds dictionary
|
18 |
+
metadata: Metadata dictionary
|
19 |
+
threshold_profile: Selected threshold profile
|
20 |
+
active_threshold: Overall threshold value
|
21 |
+
active_category_thresholds: Category-specific thresholds
|
22 |
+
min_confidence: Minimum confidence to include in results
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
Dictionary with tags, all probabilities, and other info
|
26 |
+
"""
|
27 |
+
try:
|
28 |
+
# Run inference directly using the model's predict method
|
29 |
+
if threshold_profile in ["Category-specific", "High Precision", "High Recall"]:
|
30 |
+
results = model.predict(
|
31 |
+
image_path=image_path,
|
32 |
+
category_thresholds=active_category_thresholds
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
results = model.predict(
|
36 |
+
image_path=image_path,
|
37 |
+
threshold=active_threshold
|
38 |
+
)
|
39 |
+
|
40 |
+
# Extract and organize all probabilities
|
41 |
+
all_probs = {}
|
42 |
+
probs = results['refined_probabilities'][0] # Remove batch dimension
|
43 |
+
|
44 |
+
for idx in range(len(probs)):
|
45 |
+
prob_value = probs[idx].item()
|
46 |
+
if prob_value >= min_confidence:
|
47 |
+
tag, category = model.dataset.get_tag_info(idx)
|
48 |
+
|
49 |
+
if category not in all_probs:
|
50 |
+
all_probs[category] = []
|
51 |
+
|
52 |
+
all_probs[category].append((tag, prob_value))
|
53 |
+
|
54 |
+
# Sort tags by probability within each category
|
55 |
+
for category in all_probs:
|
56 |
+
all_probs[category] = sorted(
|
57 |
+
all_probs[category],
|
58 |
+
key=lambda x: x[1],
|
59 |
+
reverse=True
|
60 |
+
)
|
61 |
+
|
62 |
+
# Get the filtered tags based on the selected threshold
|
63 |
+
tags = {}
|
64 |
+
for category, cat_tags in all_probs.items():
|
65 |
+
threshold = active_category_thresholds.get(category, active_threshold) if active_category_thresholds else active_threshold
|
66 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= threshold]
|
67 |
+
|
68 |
+
# Create a flat list of all tags above threshold
|
69 |
+
all_tags = []
|
70 |
+
for category, cat_tags in tags.items():
|
71 |
+
for tag, _ in cat_tags:
|
72 |
+
all_tags.append(tag)
|
73 |
+
|
74 |
+
return {
|
75 |
+
'tags': tags,
|
76 |
+
'all_probs': all_probs,
|
77 |
+
'all_tags': all_tags,
|
78 |
+
'success': True
|
79 |
+
}
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
print(f"Error processing {image_path}: {str(e)}")
|
83 |
+
traceback.print_exc()
|
84 |
+
return {
|
85 |
+
'tags': {},
|
86 |
+
'all_probs': {},
|
87 |
+
'all_tags': [],
|
88 |
+
'success': False,
|
89 |
+
'error': str(e)
|
90 |
+
}
|
91 |
+
|
92 |
+
def apply_category_limits(result, category_limits):
|
93 |
+
"""
|
94 |
+
Apply category limits to a result dictionary.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
result: Result dictionary containing tags and all_tags
|
98 |
+
category_limits: Dictionary mapping categories to their tag limits
|
99 |
+
(0 = exclude category, -1 = no limit/include all)
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
Updated result dictionary with limits applied
|
103 |
+
"""
|
104 |
+
if not category_limits or not result['success']:
|
105 |
+
return result
|
106 |
+
|
107 |
+
# Get the filtered tags
|
108 |
+
filtered_tags = result['tags']
|
109 |
+
|
110 |
+
# Apply limits to each category
|
111 |
+
for category, cat_tags in list(filtered_tags.items()):
|
112 |
+
# Get limit for this category, default to -1 (no limit)
|
113 |
+
limit = category_limits.get(category, -1)
|
114 |
+
|
115 |
+
if limit == 0:
|
116 |
+
# Exclude this category entirely
|
117 |
+
del filtered_tags[category]
|
118 |
+
elif limit > 0 and len(cat_tags) > limit:
|
119 |
+
# Limit to top N tags for this category
|
120 |
+
filtered_tags[category] = cat_tags[:limit]
|
121 |
+
|
122 |
+
# Regenerate all_tags list after applying limits
|
123 |
+
all_tags = []
|
124 |
+
for category, cat_tags in filtered_tags.items():
|
125 |
+
for tag, _ in cat_tags:
|
126 |
+
all_tags.append(tag)
|
127 |
+
|
128 |
+
# Update the result with limited tags
|
129 |
+
result['tags'] = filtered_tags
|
130 |
+
result['all_tags'] = all_tags
|
131 |
+
|
132 |
+
return result
|
133 |
+
|
134 |
+
def batch_process_images(folder_path, model, thresholds, metadata, threshold_profile, active_threshold,
|
135 |
+
active_category_thresholds, save_dir=None, progress_callback=None,
|
136 |
+
min_confidence=0.1, batch_size=1, category_limits=None):
|
137 |
+
"""
|
138 |
+
Process all images in a folder with optional batching for improved performance.
|
139 |
+
|
140 |
+
Args:
|
141 |
+
folder_path: Path to folder containing images
|
142 |
+
model: The image tagger model
|
143 |
+
thresholds: Thresholds dictionary
|
144 |
+
metadata: Metadata dictionary
|
145 |
+
threshold_profile: Selected threshold profile
|
146 |
+
active_threshold: Overall threshold value
|
147 |
+
active_category_thresholds: Category-specific thresholds
|
148 |
+
save_dir: Directory to save tag files (if None uses default)
|
149 |
+
progress_callback: Optional callback for progress updates
|
150 |
+
min_confidence: Minimum confidence threshold
|
151 |
+
batch_size: Number of images to process at once (default: 1)
|
152 |
+
category_limits: Dictionary mapping categories to their tag limits (0 = unlimited)
|
153 |
+
|
154 |
+
Returns:
|
155 |
+
Dictionary with results for each image
|
156 |
+
"""
|
157 |
+
from .file_utils import save_tags_to_file # Import here to avoid circular imports
|
158 |
+
import torch
|
159 |
+
from PIL import Image
|
160 |
+
import time
|
161 |
+
|
162 |
+
print(f"Starting batch processing on {folder_path} with batch size {batch_size}")
|
163 |
+
start_time = time.time()
|
164 |
+
|
165 |
+
# Find all image files in the folder
|
166 |
+
image_extensions = ['*.jpg', '*.jpeg', '*.png']
|
167 |
+
image_files = []
|
168 |
+
|
169 |
+
for ext in image_extensions:
|
170 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext)))
|
171 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext.upper())))
|
172 |
+
|
173 |
+
# Use a set to remove duplicate files (Windows filesystems are case-insensitive)
|
174 |
+
if os.name == 'nt': # Windows
|
175 |
+
# Use lowercase paths for comparison on Windows
|
176 |
+
unique_paths = set()
|
177 |
+
unique_files = []
|
178 |
+
for file_path in image_files:
|
179 |
+
normalized_path = os.path.normpath(file_path).lower()
|
180 |
+
if normalized_path not in unique_paths:
|
181 |
+
unique_paths.add(normalized_path)
|
182 |
+
unique_files.append(file_path)
|
183 |
+
image_files = unique_files
|
184 |
+
|
185 |
+
# Sort files for consistent processing order
|
186 |
+
image_files.sort()
|
187 |
+
|
188 |
+
if not image_files:
|
189 |
+
return {
|
190 |
+
'success': False,
|
191 |
+
'error': f"No images found in {folder_path}",
|
192 |
+
'results': {}
|
193 |
+
}
|
194 |
+
|
195 |
+
print(f"Found {len(image_files)} images to process")
|
196 |
+
|
197 |
+
# Use the provided save directory or create a default one
|
198 |
+
if save_dir is None:
|
199 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
200 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
201 |
+
|
202 |
+
# Ensure the directory exists
|
203 |
+
os.makedirs(save_dir, exist_ok=True)
|
204 |
+
|
205 |
+
# Process images in batches
|
206 |
+
results = {}
|
207 |
+
total_images = len(image_files)
|
208 |
+
processed = 0
|
209 |
+
|
210 |
+
# Process in batches
|
211 |
+
for i in range(0, total_images, batch_size):
|
212 |
+
batch_start = time.time()
|
213 |
+
# Get current batch of images
|
214 |
+
batch_files = image_files[i:i+batch_size]
|
215 |
+
batch_size_actual = len(batch_files)
|
216 |
+
|
217 |
+
print(f"Processing batch {i//batch_size + 1}/{(total_images + batch_size - 1)//batch_size}: {batch_size_actual} images")
|
218 |
+
|
219 |
+
if batch_size > 1:
|
220 |
+
# True batch processing for multiple images at once
|
221 |
+
try:
|
222 |
+
# Using batch processing if batch_size > 1
|
223 |
+
batch_results = process_image_batch(
|
224 |
+
image_paths=batch_files,
|
225 |
+
model=model,
|
226 |
+
thresholds=thresholds,
|
227 |
+
metadata=metadata,
|
228 |
+
threshold_profile=threshold_profile,
|
229 |
+
active_threshold=active_threshold,
|
230 |
+
active_category_thresholds=active_category_thresholds,
|
231 |
+
min_confidence=min_confidence
|
232 |
+
)
|
233 |
+
|
234 |
+
# Process and save results for each image in the batch
|
235 |
+
for j, image_path in enumerate(batch_files):
|
236 |
+
# Update progress if callback provided
|
237 |
+
if progress_callback:
|
238 |
+
progress_callback(processed + j, total_images, image_path)
|
239 |
+
|
240 |
+
if j < len(batch_results):
|
241 |
+
result = batch_results[j]
|
242 |
+
|
243 |
+
# Apply category limits if specified
|
244 |
+
if category_limits and result['success']:
|
245 |
+
# Use the apply_category_limits function instead of the inline code
|
246 |
+
result = apply_category_limits(result, category_limits)
|
247 |
+
|
248 |
+
# Debug print if you want
|
249 |
+
print(f"Applied limits for {os.path.basename(image_path)}, remaining tags: {len(result['all_tags'])}")
|
250 |
+
|
251 |
+
# Save the tags to a file
|
252 |
+
if result['success']:
|
253 |
+
output_path = save_tags_to_file(
|
254 |
+
image_path=image_path,
|
255 |
+
all_tags=result['all_tags'],
|
256 |
+
custom_dir=save_dir,
|
257 |
+
overwrite=True
|
258 |
+
)
|
259 |
+
result['output_path'] = str(output_path)
|
260 |
+
|
261 |
+
# Store the result
|
262 |
+
results[image_path] = result
|
263 |
+
else:
|
264 |
+
# Handle case where batch processing returned fewer results than expected
|
265 |
+
results[image_path] = {
|
266 |
+
'success': False,
|
267 |
+
'error': 'Batch processing error: missing result',
|
268 |
+
'all_tags': []
|
269 |
+
}
|
270 |
+
|
271 |
+
except Exception as e:
|
272 |
+
print(f"Batch processing error: {str(e)}")
|
273 |
+
traceback.print_exc()
|
274 |
+
|
275 |
+
# Fall back to processing images one by one in this batch
|
276 |
+
for j, image_path in enumerate(batch_files):
|
277 |
+
if progress_callback:
|
278 |
+
progress_callback(processed + j, total_images, image_path)
|
279 |
+
|
280 |
+
result = process_image(
|
281 |
+
image_path=image_path,
|
282 |
+
model=model,
|
283 |
+
thresholds=thresholds,
|
284 |
+
metadata=metadata,
|
285 |
+
threshold_profile=threshold_profile,
|
286 |
+
active_threshold=active_threshold,
|
287 |
+
active_category_thresholds=active_category_thresholds,
|
288 |
+
min_confidence=min_confidence
|
289 |
+
)
|
290 |
+
|
291 |
+
# Apply category limits if specified
|
292 |
+
if category_limits and result['success']:
|
293 |
+
# Use the apply_category_limits function
|
294 |
+
result = apply_category_limits(result, category_limits)
|
295 |
+
|
296 |
+
if result['success']:
|
297 |
+
output_path = save_tags_to_file(
|
298 |
+
image_path=image_path,
|
299 |
+
all_tags=result['all_tags'],
|
300 |
+
custom_dir=save_dir,
|
301 |
+
overwrite=True
|
302 |
+
)
|
303 |
+
result['output_path'] = str(output_path)
|
304 |
+
|
305 |
+
results[image_path] = result
|
306 |
+
else:
|
307 |
+
# Process one by one if batch_size is 1
|
308 |
+
for j, image_path in enumerate(batch_files):
|
309 |
+
if progress_callback:
|
310 |
+
progress_callback(processed + j, total_images, image_path)
|
311 |
+
|
312 |
+
result = process_image(
|
313 |
+
image_path=image_path,
|
314 |
+
model=model,
|
315 |
+
thresholds=thresholds,
|
316 |
+
metadata=metadata,
|
317 |
+
threshold_profile=threshold_profile,
|
318 |
+
active_threshold=active_threshold,
|
319 |
+
active_category_thresholds=active_category_thresholds,
|
320 |
+
min_confidence=min_confidence
|
321 |
+
)
|
322 |
+
|
323 |
+
# Apply category limits if specified
|
324 |
+
if category_limits and result['success']:
|
325 |
+
# Use the apply_category_limits function
|
326 |
+
result = apply_category_limits(result, category_limits)
|
327 |
+
|
328 |
+
if result['success']:
|
329 |
+
output_path = save_tags_to_file(
|
330 |
+
image_path=image_path,
|
331 |
+
all_tags=result['all_tags'],
|
332 |
+
custom_dir=save_dir,
|
333 |
+
overwrite=True
|
334 |
+
)
|
335 |
+
result['output_path'] = str(output_path)
|
336 |
+
|
337 |
+
results[image_path] = result
|
338 |
+
|
339 |
+
# Update processed count
|
340 |
+
processed += batch_size_actual
|
341 |
+
|
342 |
+
# Calculate batch timing
|
343 |
+
batch_end = time.time()
|
344 |
+
batch_time = batch_end - batch_start
|
345 |
+
print(f"Batch processed in {batch_time:.2f} seconds ({batch_time/batch_size_actual:.2f} seconds per image)")
|
346 |
+
|
347 |
+
# Final progress update
|
348 |
+
if progress_callback:
|
349 |
+
progress_callback(total_images, total_images, None)
|
350 |
+
|
351 |
+
end_time = time.time()
|
352 |
+
total_time = end_time - start_time
|
353 |
+
print(f"Batch processing finished. Total time: {total_time:.2f} seconds, Average: {total_time/total_images:.2f} seconds per image")
|
354 |
+
|
355 |
+
return {
|
356 |
+
'success': True,
|
357 |
+
'total': total_images,
|
358 |
+
'processed': len(results),
|
359 |
+
'results': results,
|
360 |
+
'save_dir': save_dir,
|
361 |
+
'time_elapsed': end_time - start_time
|
362 |
+
}
|
363 |
+
|
364 |
+
def process_image_batch(image_paths, model, thresholds, metadata, threshold_profile, active_threshold, active_category_thresholds, min_confidence=0.1):
|
365 |
+
"""
|
366 |
+
Process a batch of images at once.
|
367 |
+
|
368 |
+
Args:
|
369 |
+
image_paths: List of paths to the images
|
370 |
+
model: The image tagger model
|
371 |
+
thresholds: Thresholds dictionary
|
372 |
+
metadata: Metadata dictionary
|
373 |
+
threshold_profile: Selected threshold profile
|
374 |
+
active_threshold: Overall threshold value
|
375 |
+
active_category_thresholds: Category-specific thresholds
|
376 |
+
min_confidence: Minimum confidence to include in results
|
377 |
+
|
378 |
+
Returns:
|
379 |
+
List of dictionaries with tags, all probabilities, and other info for each image
|
380 |
+
"""
|
381 |
+
try:
|
382 |
+
import torch
|
383 |
+
from PIL import Image
|
384 |
+
import torchvision.transforms as transforms
|
385 |
+
|
386 |
+
# Identify the model type we're using for better error handling
|
387 |
+
model_type = model.__class__.__name__
|
388 |
+
print(f"Running batch processing with model type: {model_type}")
|
389 |
+
|
390 |
+
# Prepare the transformation for the images
|
391 |
+
transform = transforms.Compose([
|
392 |
+
transforms.Resize((512, 512)), # Adjust based on your model's expected input
|
393 |
+
transforms.ToTensor(),
|
394 |
+
])
|
395 |
+
|
396 |
+
# Get model information
|
397 |
+
device = next(model.parameters()).device
|
398 |
+
dtype = next(model.parameters()).dtype
|
399 |
+
print(f"Model is using device: {device}, dtype: {dtype}")
|
400 |
+
|
401 |
+
# Load and preprocess all images
|
402 |
+
batch_tensor = []
|
403 |
+
valid_images = []
|
404 |
+
|
405 |
+
for img_path in image_paths:
|
406 |
+
try:
|
407 |
+
img = Image.open(img_path).convert('RGB')
|
408 |
+
img_tensor = transform(img)
|
409 |
+
img_tensor = img_tensor.to(device=device, dtype=dtype)
|
410 |
+
batch_tensor.append(img_tensor)
|
411 |
+
valid_images.append(img_path)
|
412 |
+
except Exception as e:
|
413 |
+
print(f"Error loading image {img_path}: {str(e)}")
|
414 |
+
|
415 |
+
if not batch_tensor:
|
416 |
+
return []
|
417 |
+
|
418 |
+
# Stack all tensors into a single batch
|
419 |
+
batch_input = torch.stack(batch_tensor)
|
420 |
+
|
421 |
+
# Process entire batch at once
|
422 |
+
with torch.no_grad():
|
423 |
+
try:
|
424 |
+
# Forward pass on the whole batch
|
425 |
+
output = model(batch_input)
|
426 |
+
|
427 |
+
# Handle tuple output format
|
428 |
+
if isinstance(output, tuple):
|
429 |
+
probs_batch = torch.sigmoid(output[1])
|
430 |
+
else:
|
431 |
+
probs_batch = torch.sigmoid(output)
|
432 |
+
|
433 |
+
# Process each image's results
|
434 |
+
results = []
|
435 |
+
for i, img_path in enumerate(valid_images):
|
436 |
+
probs = probs_batch[i].unsqueeze(0) # Add batch dimension back
|
437 |
+
|
438 |
+
# Extract and organize all probabilities
|
439 |
+
all_probs = {}
|
440 |
+
for idx in range(probs.size(1)):
|
441 |
+
prob_value = probs[0, idx].item()
|
442 |
+
if prob_value >= min_confidence:
|
443 |
+
tag, category = model.dataset.get_tag_info(idx)
|
444 |
+
|
445 |
+
if category not in all_probs:
|
446 |
+
all_probs[category] = []
|
447 |
+
|
448 |
+
all_probs[category].append((tag, prob_value))
|
449 |
+
|
450 |
+
# Sort tags by probability
|
451 |
+
for category in all_probs:
|
452 |
+
all_probs[category] = sorted(all_probs[category], key=lambda x: x[1], reverse=True)
|
453 |
+
|
454 |
+
# Get filtered tags
|
455 |
+
tags = {}
|
456 |
+
for category, cat_tags in all_probs.items():
|
457 |
+
threshold = active_category_thresholds.get(category, active_threshold) if active_category_thresholds else active_threshold
|
458 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= threshold]
|
459 |
+
|
460 |
+
# Create a flat list of all tags above threshold
|
461 |
+
all_tags = []
|
462 |
+
for category, cat_tags in tags.items():
|
463 |
+
for tag, _ in cat_tags:
|
464 |
+
all_tags.append(tag)
|
465 |
+
|
466 |
+
results.append({
|
467 |
+
'tags': tags,
|
468 |
+
'all_probs': all_probs,
|
469 |
+
'all_tags': all_tags,
|
470 |
+
'success': True
|
471 |
+
})
|
472 |
+
|
473 |
+
return results
|
474 |
+
|
475 |
+
except RuntimeError as e:
|
476 |
+
# If we encounter CUDA out of memory or another runtime error,
|
477 |
+
# fall back to processing one by one
|
478 |
+
print(f"Error in batch processing: {str(e)}")
|
479 |
+
print("Falling back to one-by-one processing...")
|
480 |
+
|
481 |
+
# Process one by one as fallback
|
482 |
+
results = []
|
483 |
+
for i, (img_tensor, img_path) in enumerate(zip(batch_tensor, valid_images)):
|
484 |
+
try:
|
485 |
+
input_tensor = img_tensor.unsqueeze(0)
|
486 |
+
output = model(input_tensor)
|
487 |
+
|
488 |
+
if isinstance(output, tuple):
|
489 |
+
probs = torch.sigmoid(output[1])
|
490 |
+
else:
|
491 |
+
probs = torch.sigmoid(output)
|
492 |
+
|
493 |
+
# Same post-processing as before...
|
494 |
+
# [Code omitted for brevity]
|
495 |
+
|
496 |
+
except Exception as e:
|
497 |
+
print(f"Error processing image {img_path}: {str(e)}")
|
498 |
+
results.append({
|
499 |
+
'tags': {},
|
500 |
+
'all_probs': {},
|
501 |
+
'all_tags': [],
|
502 |
+
'success': False,
|
503 |
+
'error': str(e)
|
504 |
+
})
|
505 |
+
|
506 |
+
return results
|
507 |
+
|
508 |
+
except Exception as e:
|
509 |
+
print(f"Error in batch processing: {str(e)}")
|
510 |
+
import traceback
|
511 |
+
traceback.print_exc()
|
app/utils/model_loader.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model loading utilities for Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import json
|
7 |
+
import torch
|
8 |
+
import platform
|
9 |
+
import traceback
|
10 |
+
import importlib.util
|
11 |
+
|
12 |
+
|
13 |
+
def is_windows():
|
14 |
+
"""Check if the system is Windows"""
|
15 |
+
return platform.system() == "Windows"
|
16 |
+
|
17 |
+
|
18 |
+
class DummyDataset:
|
19 |
+
"""Minimal dataset class for inference"""
|
20 |
+
def __init__(self, metadata):
|
21 |
+
self.total_tags = metadata['total_tags']
|
22 |
+
self.idx_to_tag = {int(k): v for k, v in metadata['idx_to_tag'].items()}
|
23 |
+
self.tag_to_category = metadata['tag_to_category']
|
24 |
+
|
25 |
+
def get_tag_info(self, idx):
|
26 |
+
tag = self.idx_to_tag.get(idx, f"unknown_{idx}")
|
27 |
+
category = self.tag_to_category.get(tag, "general")
|
28 |
+
return tag, category
|
29 |
+
|
30 |
+
|
31 |
+
def load_model_code(model_dir):
|
32 |
+
"""
|
33 |
+
Load the model code module from the model directory.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
model_dir: Path to the model directory
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
Imported model code module
|
40 |
+
"""
|
41 |
+
model_code_path = os.path.join(model_dir, "model_code.py")
|
42 |
+
|
43 |
+
if not os.path.exists(model_code_path):
|
44 |
+
raise FileNotFoundError(f"model_code.py not found at {model_code_path}")
|
45 |
+
|
46 |
+
# Import the model code dynamically
|
47 |
+
spec = importlib.util.spec_from_file_location("model_code", model_code_path)
|
48 |
+
model_code = importlib.util.module_from_spec(spec)
|
49 |
+
spec.loader.exec_module(model_code)
|
50 |
+
|
51 |
+
# Check if required classes exist
|
52 |
+
if not hasattr(model_code, 'ImageTagger') or not hasattr(model_code, 'FlashAttention'):
|
53 |
+
raise ImportError("Required classes not found in model_code.py")
|
54 |
+
|
55 |
+
return model_code
|
56 |
+
|
57 |
+
|
58 |
+
def check_flash_attention():
|
59 |
+
"""
|
60 |
+
Check if Flash Attention is properly installed.
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
bool: True if Flash Attention is available and working
|
64 |
+
"""
|
65 |
+
try:
|
66 |
+
import flash_attn
|
67 |
+
if hasattr(flash_attn, 'flash_attn_func'):
|
68 |
+
module_path = flash_attn.flash_attn_func.__module__
|
69 |
+
return 'flash_attn_fallback' not in module_path
|
70 |
+
except:
|
71 |
+
pass
|
72 |
+
return False
|
73 |
+
|
74 |
+
|
75 |
+
def estimate_model_memory_usage(model, device):
|
76 |
+
"""
|
77 |
+
Estimate the memory usage of a model in MB.
|
78 |
+
"""
|
79 |
+
mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()])
|
80 |
+
mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()])
|
81 |
+
mem_total = mem_params + mem_bufs # in bytes
|
82 |
+
return mem_total / (1024 * 1024) # convert to MB
|
83 |
+
|
84 |
+
|
85 |
+
def load_exported_model(model_dir, model_type="full"):
|
86 |
+
"""
|
87 |
+
Load the exported model and metadata with correct precision.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
model_dir: Directory containing the model files
|
91 |
+
model_type: "full" or "initial_only"
|
92 |
+
|
93 |
+
Returns:
|
94 |
+
model, thresholds, metadata
|
95 |
+
"""
|
96 |
+
print(f"Loading {model_type} model from {model_dir}")
|
97 |
+
|
98 |
+
# Make sure we have the absolute path to the model directory
|
99 |
+
model_dir = os.path.abspath(model_dir)
|
100 |
+
print(f"Absolute model path: {model_dir}")
|
101 |
+
|
102 |
+
# Check for required files
|
103 |
+
metadata_path = os.path.join(model_dir, "metadata.json")
|
104 |
+
thresholds_path = os.path.join(model_dir, "thresholds.json")
|
105 |
+
|
106 |
+
print(f"Looking for thresholds at: {thresholds_path}")
|
107 |
+
|
108 |
+
# Check platform and Flash Attention status
|
109 |
+
windows_system = is_windows()
|
110 |
+
flash_attn_installed = check_flash_attention()
|
111 |
+
|
112 |
+
# Add a specific warning for Windows users trying to use the full model without Flash Attention
|
113 |
+
if windows_system and model_type == "full" and not flash_attn_installed:
|
114 |
+
print("Note: On Windows without Flash Attention, the full model will not work")
|
115 |
+
print(" which may produce less accurate results.")
|
116 |
+
print(" Consider using the 'initial_only' model for better performance on Windows.")
|
117 |
+
|
118 |
+
# Determine file paths based on model type
|
119 |
+
if model_type == "initial_only":
|
120 |
+
# Try both naming conventions
|
121 |
+
if os.path.exists(os.path.join(model_dir, "model_initial_only.pt")):
|
122 |
+
model_path = os.path.join(model_dir, "model_initial_only.pt")
|
123 |
+
else:
|
124 |
+
model_path = os.path.join(model_dir, "model_initial.pt")
|
125 |
+
|
126 |
+
# Try both naming conventions for info file
|
127 |
+
if os.path.exists(os.path.join(model_dir, "model_info_initial_only.json")):
|
128 |
+
model_info_path = os.path.join(model_dir, "model_info_initial_only.json")
|
129 |
+
else:
|
130 |
+
model_info_path = os.path.join(model_dir, "model_info_initial.json")
|
131 |
+
else:
|
132 |
+
# Try multiple naming conventions for the full model
|
133 |
+
model_filenames = ["model_refined.pt", "model.pt", "model_full.pt"]
|
134 |
+
model_path = None
|
135 |
+
for filename in model_filenames:
|
136 |
+
path = os.path.join(model_dir, filename)
|
137 |
+
if os.path.exists(path):
|
138 |
+
model_path = path
|
139 |
+
break
|
140 |
+
|
141 |
+
if model_path is None:
|
142 |
+
raise FileNotFoundError(f"No model file found in {model_dir}. Looked for: {', '.join(model_filenames)}")
|
143 |
+
|
144 |
+
model_info_path = os.path.join(model_dir, "model_info.json")
|
145 |
+
|
146 |
+
# Check for required files
|
147 |
+
metadata_path = os.path.join(model_dir, "metadata.json")
|
148 |
+
thresholds_path = os.path.join(model_dir, "thresholds.json")
|
149 |
+
|
150 |
+
required_files = [metadata_path, thresholds_path, model_path]
|
151 |
+
for file_path in required_files:
|
152 |
+
if not os.path.exists(file_path):
|
153 |
+
raise FileNotFoundError(f"Required file {file_path} not found")
|
154 |
+
|
155 |
+
# Load metadata
|
156 |
+
with open(metadata_path, "r") as f:
|
157 |
+
metadata = json.load(f)
|
158 |
+
|
159 |
+
# Load model code
|
160 |
+
model_code = load_model_code(model_dir)
|
161 |
+
|
162 |
+
# Create dataset
|
163 |
+
dummy_dataset = DummyDataset(metadata)
|
164 |
+
|
165 |
+
# Determine device
|
166 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
167 |
+
|
168 |
+
# Load model info
|
169 |
+
if os.path.exists(model_info_path):
|
170 |
+
with open(model_info_path, 'r') as f:
|
171 |
+
model_info = json.load(f)
|
172 |
+
print("Loaded model info:", model_info)
|
173 |
+
tag_context_size = model_info.get('tag_context_size', 256)
|
174 |
+
num_heads = model_info.get('num_heads', 16)
|
175 |
+
else:
|
176 |
+
print("Model info not found, using defaults")
|
177 |
+
tag_context_size = 256
|
178 |
+
num_heads = 16
|
179 |
+
|
180 |
+
try:
|
181 |
+
# Check if InitialOnlyImageTagger class exists
|
182 |
+
has_initial_only_class = hasattr(model_code, 'InitialOnlyImageTagger')
|
183 |
+
|
184 |
+
# Create the appropriate model type
|
185 |
+
if model_type == "initial_only":
|
186 |
+
# Create the lightweight model
|
187 |
+
if has_initial_only_class:
|
188 |
+
model = model_code.InitialOnlyImageTagger(
|
189 |
+
total_tags=metadata['total_tags'],
|
190 |
+
dataset=dummy_dataset,
|
191 |
+
pretrained=False
|
192 |
+
)
|
193 |
+
else:
|
194 |
+
# Fallback to using ImageTagger for initial-only if the specific class isn't available
|
195 |
+
print("InitialOnlyImageTagger class not found. Using ImageTagger as fallback.")
|
196 |
+
model = model_code.ImageTagger(
|
197 |
+
total_tags=metadata['total_tags'],
|
198 |
+
dataset=dummy_dataset,
|
199 |
+
pretrained=False,
|
200 |
+
tag_context_size=tag_context_size,
|
201 |
+
num_heads=num_heads
|
202 |
+
)
|
203 |
+
else:
|
204 |
+
# Create the full model
|
205 |
+
model = model_code.ImageTagger(
|
206 |
+
total_tags=metadata['total_tags'],
|
207 |
+
dataset=dummy_dataset,
|
208 |
+
pretrained=False,
|
209 |
+
tag_context_size=tag_context_size,
|
210 |
+
num_heads=num_heads
|
211 |
+
)
|
212 |
+
|
213 |
+
# Load state dict
|
214 |
+
state_dict = torch.load(model_path, map_location=device)
|
215 |
+
|
216 |
+
# Try loading with strict=True first, then fall back to strict=False
|
217 |
+
try:
|
218 |
+
model.load_state_dict(state_dict, strict=True)
|
219 |
+
print("✓ Model loaded with strict=True")
|
220 |
+
except Exception as e:
|
221 |
+
print(f"Warning: Strict loading failed: {str(e)}")
|
222 |
+
print("Attempting to load with strict=False...")
|
223 |
+
model.load_state_dict(state_dict, strict=False)
|
224 |
+
print("✓ Model loaded with strict=False")
|
225 |
+
|
226 |
+
# Ensure model is in half precision to match training conditions
|
227 |
+
model = model.to(device=device, dtype=torch.float16)
|
228 |
+
model.eval()
|
229 |
+
|
230 |
+
# Check parameter dtype
|
231 |
+
param_dtype = next(model.parameters()).dtype
|
232 |
+
print(f"Model loaded successfully on {device} with precision {param_dtype}")
|
233 |
+
print(f"Model memory usage: {estimate_model_memory_usage(model, device):.2f} MB")
|
234 |
+
|
235 |
+
except Exception as e:
|
236 |
+
print(f"Error loading model: {str(e)}")
|
237 |
+
traceback.print_exc()
|
238 |
+
raise
|
239 |
+
|
240 |
+
# Load thresholds
|
241 |
+
with open(thresholds_path, "r") as f:
|
242 |
+
thresholds = json.load(f)
|
243 |
+
|
244 |
+
return model, thresholds, metadata
|
app/utils/onnx_processing.py
ADDED
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
ONNX-based batch image processing for the Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import json
|
7 |
+
import time
|
8 |
+
import traceback
|
9 |
+
import numpy as np
|
10 |
+
import glob
|
11 |
+
import onnxruntime as ort
|
12 |
+
from PIL import Image
|
13 |
+
import torchvision.transforms as transforms
|
14 |
+
from concurrent.futures import ThreadPoolExecutor
|
15 |
+
|
16 |
+
def preprocess_image(image_path, image_size=512):
|
17 |
+
"""Process an image for inference"""
|
18 |
+
if not os.path.exists(image_path):
|
19 |
+
raise ValueError(f"Image not found at path: {image_path}")
|
20 |
+
|
21 |
+
# Initialize transform
|
22 |
+
transform = transforms.Compose([
|
23 |
+
transforms.ToTensor(),
|
24 |
+
])
|
25 |
+
|
26 |
+
try:
|
27 |
+
with Image.open(image_path) as img:
|
28 |
+
# Convert RGBA or Palette images to RGB
|
29 |
+
if img.mode in ('RGBA', 'P'):
|
30 |
+
img = img.convert('RGB')
|
31 |
+
|
32 |
+
# Get original dimensions
|
33 |
+
width, height = img.size
|
34 |
+
aspect_ratio = width / height
|
35 |
+
|
36 |
+
# Calculate new dimensions to maintain aspect ratio
|
37 |
+
if aspect_ratio > 1:
|
38 |
+
new_width = image_size
|
39 |
+
new_height = int(new_width / aspect_ratio)
|
40 |
+
else:
|
41 |
+
new_height = image_size
|
42 |
+
new_width = int(new_height * aspect_ratio)
|
43 |
+
|
44 |
+
# Resize with LANCZOS filter
|
45 |
+
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
46 |
+
|
47 |
+
# Create new image with padding
|
48 |
+
new_image = Image.new('RGB', (image_size, image_size), (0, 0, 0))
|
49 |
+
paste_x = (image_size - new_width) // 2
|
50 |
+
paste_y = (image_size - new_height) // 2
|
51 |
+
new_image.paste(img, (paste_x, paste_y))
|
52 |
+
|
53 |
+
# Apply transforms
|
54 |
+
img_tensor = transform(new_image)
|
55 |
+
return img_tensor.numpy()
|
56 |
+
except Exception as e:
|
57 |
+
raise Exception(f"Error processing {image_path}: {str(e)}")
|
58 |
+
|
59 |
+
def process_single_image_onnx(image_path, model_path, metadata, threshold_profile="Overall",
|
60 |
+
active_threshold=0.35, active_category_thresholds=None,
|
61 |
+
min_confidence=0.1):
|
62 |
+
"""
|
63 |
+
Process a single image using ONNX model
|
64 |
+
|
65 |
+
Args:
|
66 |
+
image_path: Path to the image file
|
67 |
+
model_path: Path to the ONNX model file
|
68 |
+
metadata: Model metadata dictionary
|
69 |
+
threshold_profile: The threshold profile being used
|
70 |
+
active_threshold: Overall threshold value
|
71 |
+
active_category_thresholds: Category-specific thresholds
|
72 |
+
min_confidence: Minimum confidence to include in results
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
Dictionary with tags and probabilities
|
76 |
+
"""
|
77 |
+
import time
|
78 |
+
|
79 |
+
try:
|
80 |
+
# Create ONNX tagger for this image (or reuse an existing one)
|
81 |
+
if hasattr(process_single_image_onnx, 'tagger'):
|
82 |
+
tagger = process_single_image_onnx.tagger
|
83 |
+
else:
|
84 |
+
# Get metadata path from model_path
|
85 |
+
metadata_path = model_path.replace('.onnx', '_metadata.json')
|
86 |
+
if not os.path.exists(metadata_path):
|
87 |
+
metadata_path = model_path.replace('.onnx', '') + '_metadata.json'
|
88 |
+
|
89 |
+
# Create new tagger
|
90 |
+
tagger = ONNXImageTagger(model_path, metadata_path)
|
91 |
+
# Cache it for future calls
|
92 |
+
process_single_image_onnx.tagger = tagger
|
93 |
+
|
94 |
+
# Preprocess the image
|
95 |
+
start_time = time.time()
|
96 |
+
img_array = preprocess_image(image_path)
|
97 |
+
|
98 |
+
# Run inference
|
99 |
+
results = tagger.predict_batch(
|
100 |
+
[img_array],
|
101 |
+
threshold=active_threshold,
|
102 |
+
category_thresholds=active_category_thresholds,
|
103 |
+
min_confidence=min_confidence
|
104 |
+
)
|
105 |
+
inference_time = time.time() - start_time
|
106 |
+
|
107 |
+
if results:
|
108 |
+
result = results[0]
|
109 |
+
result['inference_time'] = inference_time
|
110 |
+
return result
|
111 |
+
else:
|
112 |
+
return {
|
113 |
+
'success': False,
|
114 |
+
'error': 'Failed to process image',
|
115 |
+
'all_tags': [],
|
116 |
+
'all_probs': {},
|
117 |
+
'tags': {}
|
118 |
+
}
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
import traceback
|
122 |
+
print(f"Error in process_single_image_onnx: {str(e)}")
|
123 |
+
traceback.print_exc()
|
124 |
+
return {
|
125 |
+
'success': False,
|
126 |
+
'error': str(e),
|
127 |
+
'all_tags': [],
|
128 |
+
'all_probs': {},
|
129 |
+
'tags': {}
|
130 |
+
}
|
131 |
+
|
132 |
+
def preprocess_images_parallel(image_paths, image_size=512, max_workers=8):
|
133 |
+
"""Process multiple images in parallel"""
|
134 |
+
processed_images = []
|
135 |
+
valid_paths = []
|
136 |
+
|
137 |
+
# Define a worker function
|
138 |
+
def process_single_image(path):
|
139 |
+
try:
|
140 |
+
return preprocess_image(path, image_size), path
|
141 |
+
except Exception as e:
|
142 |
+
print(f"Error processing {path}: {str(e)}")
|
143 |
+
return None, path
|
144 |
+
|
145 |
+
# Process images in parallel
|
146 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
147 |
+
results = list(executor.map(process_single_image, image_paths))
|
148 |
+
|
149 |
+
# Filter results
|
150 |
+
for img_array, path in results:
|
151 |
+
if img_array is not None:
|
152 |
+
processed_images.append(img_array)
|
153 |
+
valid_paths.append(path)
|
154 |
+
|
155 |
+
return processed_images, valid_paths
|
156 |
+
|
157 |
+
def apply_category_limits(result, category_limits):
|
158 |
+
"""
|
159 |
+
Apply category limits to a result dictionary.
|
160 |
+
|
161 |
+
Args:
|
162 |
+
result: Result dictionary containing tags and all_tags
|
163 |
+
category_limits: Dictionary mapping categories to their tag limits
|
164 |
+
(0 = exclude category, -1 = no limit/include all)
|
165 |
+
|
166 |
+
Returns:
|
167 |
+
Updated result dictionary with limits applied
|
168 |
+
"""
|
169 |
+
if not category_limits or not result['success']:
|
170 |
+
return result
|
171 |
+
|
172 |
+
# Get the filtered tags
|
173 |
+
filtered_tags = result['tags']
|
174 |
+
|
175 |
+
# Apply limits to each category
|
176 |
+
for category, cat_tags in list(filtered_tags.items()):
|
177 |
+
# Get limit for this category, default to -1 (no limit)
|
178 |
+
limit = category_limits.get(category, -1)
|
179 |
+
|
180 |
+
if limit == 0:
|
181 |
+
# Exclude this category entirely
|
182 |
+
del filtered_tags[category]
|
183 |
+
elif limit > 0 and len(cat_tags) > limit:
|
184 |
+
# Limit to top N tags for this category
|
185 |
+
filtered_tags[category] = cat_tags[:limit]
|
186 |
+
|
187 |
+
# Regenerate all_tags list after applying limits
|
188 |
+
all_tags = []
|
189 |
+
for category, cat_tags in filtered_tags.items():
|
190 |
+
for tag, _ in cat_tags:
|
191 |
+
all_tags.append(tag)
|
192 |
+
|
193 |
+
# Update the result with limited tags
|
194 |
+
result['tags'] = filtered_tags
|
195 |
+
result['all_tags'] = all_tags
|
196 |
+
|
197 |
+
return result
|
198 |
+
|
199 |
+
class ONNXImageTagger:
|
200 |
+
"""ONNX-based image tagger for fast batch inference"""
|
201 |
+
|
202 |
+
def __init__(self, model_path, metadata_path):
|
203 |
+
# Load model
|
204 |
+
self.model_path = model_path
|
205 |
+
try:
|
206 |
+
self.session = ort.InferenceSession(
|
207 |
+
model_path,
|
208 |
+
providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
|
209 |
+
)
|
210 |
+
print(f"Using providers: {self.session.get_providers()}")
|
211 |
+
except Exception as e:
|
212 |
+
print(f"CUDA not available, using CPU: {e}")
|
213 |
+
self.session = ort.InferenceSession(
|
214 |
+
model_path,
|
215 |
+
providers=['CPUExecutionProvider']
|
216 |
+
)
|
217 |
+
print(f"Using providers: {self.session.get_providers()}")
|
218 |
+
|
219 |
+
# Load metadata
|
220 |
+
with open(metadata_path, 'r') as f:
|
221 |
+
self.metadata = json.load(f)
|
222 |
+
|
223 |
+
# Get input name
|
224 |
+
self.input_name = self.session.get_inputs()[0].name
|
225 |
+
print(f"Model loaded successfully. Input name: {self.input_name}")
|
226 |
+
|
227 |
+
def predict_batch(self, image_arrays, threshold=0.325, category_thresholds=None, min_confidence=0.1):
|
228 |
+
"""Run batch inference on preprocessed image arrays"""
|
229 |
+
# Stack arrays into batch
|
230 |
+
batch_input = np.stack(image_arrays)
|
231 |
+
|
232 |
+
# Run inference
|
233 |
+
start_time = time.time()
|
234 |
+
outputs = self.session.run(None, {self.input_name: batch_input})
|
235 |
+
inference_time = time.time() - start_time
|
236 |
+
print(f"Batch inference completed in {inference_time:.4f} seconds ({inference_time/len(image_arrays):.4f} s/image)")
|
237 |
+
|
238 |
+
# Process outputs
|
239 |
+
initial_probs = 1.0 / (1.0 + np.exp(-outputs[0])) # Apply sigmoid
|
240 |
+
refined_probs = 1.0 / (1.0 + np.exp(-outputs[1])) if len(outputs) > 1 else initial_probs
|
241 |
+
|
242 |
+
# Apply thresholds and extract tags for each image
|
243 |
+
batch_results = []
|
244 |
+
|
245 |
+
for i in range(refined_probs.shape[0]):
|
246 |
+
probs = refined_probs[i]
|
247 |
+
|
248 |
+
# Extract and organize all probabilities
|
249 |
+
all_probs = {}
|
250 |
+
for idx in range(probs.shape[0]):
|
251 |
+
prob_value = float(probs[idx])
|
252 |
+
if prob_value >= min_confidence:
|
253 |
+
idx_str = str(idx)
|
254 |
+
tag_name = self.metadata['idx_to_tag'].get(idx_str, f"unknown-{idx}")
|
255 |
+
category = self.metadata['tag_to_category'].get(tag_name, "general")
|
256 |
+
|
257 |
+
if category not in all_probs:
|
258 |
+
all_probs[category] = []
|
259 |
+
|
260 |
+
all_probs[category].append((tag_name, prob_value))
|
261 |
+
|
262 |
+
# Sort tags by probability within each category
|
263 |
+
for category in all_probs:
|
264 |
+
all_probs[category] = sorted(
|
265 |
+
all_probs[category],
|
266 |
+
key=lambda x: x[1],
|
267 |
+
reverse=True
|
268 |
+
)
|
269 |
+
|
270 |
+
# Get the filtered tags based on the selected threshold
|
271 |
+
tags = {}
|
272 |
+
for category, cat_tags in all_probs.items():
|
273 |
+
# Use category-specific threshold if available
|
274 |
+
if category_thresholds and category in category_thresholds:
|
275 |
+
cat_threshold = category_thresholds[category]
|
276 |
+
else:
|
277 |
+
cat_threshold = threshold
|
278 |
+
|
279 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= cat_threshold]
|
280 |
+
|
281 |
+
# Create a flat list of all tags above threshold
|
282 |
+
all_tags = []
|
283 |
+
for category, cat_tags in tags.items():
|
284 |
+
for tag, _ in cat_tags:
|
285 |
+
all_tags.append(tag)
|
286 |
+
|
287 |
+
batch_results.append({
|
288 |
+
'tags': tags,
|
289 |
+
'all_probs': all_probs,
|
290 |
+
'all_tags': all_tags,
|
291 |
+
'success': True
|
292 |
+
})
|
293 |
+
|
294 |
+
return batch_results
|
295 |
+
|
296 |
+
def batch_process_images_onnx(folder_path, model_path, metadata_path, threshold_profile,
|
297 |
+
active_threshold, active_category_thresholds, save_dir=None,
|
298 |
+
progress_callback=None, min_confidence=0.1, batch_size=16,
|
299 |
+
category_limits=None):
|
300 |
+
"""
|
301 |
+
Process all images in a folder using the ONNX model.
|
302 |
+
|
303 |
+
Args:
|
304 |
+
folder_path: Path to folder containing images
|
305 |
+
model_path: Path to the ONNX model file
|
306 |
+
metadata_path: Path to the model metadata file
|
307 |
+
threshold_profile: Selected threshold profile
|
308 |
+
active_threshold: Overall threshold value
|
309 |
+
active_category_thresholds: Category-specific thresholds
|
310 |
+
save_dir: Directory to save tag files (if None uses default)
|
311 |
+
progress_callback: Optional callback for progress updates
|
312 |
+
min_confidence: Minimum confidence threshold
|
313 |
+
batch_size: Number of images to process at once
|
314 |
+
category_limits: Dictionary mapping categories to their tag limits (0 = unlimited)
|
315 |
+
|
316 |
+
Returns:
|
317 |
+
Dictionary with results for each image
|
318 |
+
"""
|
319 |
+
from utils.file_utils import save_tags_to_file # Import here to avoid circular imports
|
320 |
+
|
321 |
+
# Find all image files in the folder
|
322 |
+
image_extensions = ['*.jpg', '*.jpeg', '*.png']
|
323 |
+
image_files = []
|
324 |
+
|
325 |
+
for ext in image_extensions:
|
326 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext)))
|
327 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext.upper())))
|
328 |
+
# Use a set to remove duplicate files (Windows filesystems are case-insensitive)
|
329 |
+
if os.name == 'nt': # Windows
|
330 |
+
# Use lowercase paths for comparison on Windows
|
331 |
+
unique_paths = set()
|
332 |
+
unique_files = []
|
333 |
+
for file_path in image_files:
|
334 |
+
normalized_path = os.path.normpath(file_path).lower()
|
335 |
+
if normalized_path not in unique_paths:
|
336 |
+
unique_paths.add(normalized_path)
|
337 |
+
unique_files.append(file_path)
|
338 |
+
image_files = unique_files
|
339 |
+
|
340 |
+
if not image_files:
|
341 |
+
return {
|
342 |
+
'success': False,
|
343 |
+
'error': f"No images found in {folder_path}",
|
344 |
+
'results': {}
|
345 |
+
}
|
346 |
+
|
347 |
+
# Use the provided save directory or create a default one
|
348 |
+
if save_dir is None:
|
349 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
350 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
351 |
+
|
352 |
+
# Ensure the directory exists
|
353 |
+
os.makedirs(save_dir, exist_ok=True)
|
354 |
+
|
355 |
+
# Create ONNX tagger
|
356 |
+
tagger = ONNXImageTagger(model_path, metadata_path)
|
357 |
+
|
358 |
+
# Process images in batches
|
359 |
+
results = {}
|
360 |
+
total_images = len(image_files)
|
361 |
+
processed = 0
|
362 |
+
|
363 |
+
start_time = time.time()
|
364 |
+
|
365 |
+
# Process in batches
|
366 |
+
for i in range(0, total_images, batch_size):
|
367 |
+
batch_start = time.time()
|
368 |
+
|
369 |
+
# Get current batch of images
|
370 |
+
batch_files = image_files[i:i+batch_size]
|
371 |
+
batch_size_actual = len(batch_files)
|
372 |
+
|
373 |
+
# Update progress if callback provided
|
374 |
+
if progress_callback:
|
375 |
+
progress_callback(processed, total_images, batch_files[0] if batch_files else None)
|
376 |
+
|
377 |
+
print(f"Processing batch {i//batch_size + 1}/{(total_images + batch_size - 1)//batch_size}: {batch_size_actual} images")
|
378 |
+
|
379 |
+
try:
|
380 |
+
# Preprocess images in parallel
|
381 |
+
processed_images, valid_paths = preprocess_images_parallel(batch_files)
|
382 |
+
|
383 |
+
if processed_images:
|
384 |
+
# Run batch prediction
|
385 |
+
batch_results = tagger.predict_batch(
|
386 |
+
processed_images,
|
387 |
+
threshold=active_threshold,
|
388 |
+
category_thresholds=active_category_thresholds,
|
389 |
+
min_confidence=min_confidence
|
390 |
+
)
|
391 |
+
|
392 |
+
# Process results for each image
|
393 |
+
for j, (image_path, result) in enumerate(zip(valid_paths, batch_results)):
|
394 |
+
# Update progress if callback provided
|
395 |
+
if progress_callback:
|
396 |
+
progress_callback(processed + j, total_images, image_path)
|
397 |
+
|
398 |
+
# Debug print to track what's happening
|
399 |
+
print(f"Before limiting - Tags for {os.path.basename(image_path)}: {len(result['all_tags'])} tags")
|
400 |
+
print(f"Category limits applied: {category_limits}")
|
401 |
+
|
402 |
+
# Make sure we apply limits right before saving
|
403 |
+
if category_limits and result['success']:
|
404 |
+
# Before counts for debugging
|
405 |
+
before_counts = {cat: len(tags) for cat, tags in result['tags'].items()}
|
406 |
+
|
407 |
+
# Apply the limits
|
408 |
+
result = apply_category_limits(result, category_limits)
|
409 |
+
|
410 |
+
# After counts for debugging
|
411 |
+
after_counts = {cat: len(tags) for cat, tags in result['tags'].items()}
|
412 |
+
|
413 |
+
# Print the effect of limits
|
414 |
+
print(f"Before limits: {before_counts}")
|
415 |
+
print(f"After limits: {after_counts}")
|
416 |
+
print(f"After limiting - Tags for {os.path.basename(image_path)}: {len(result['all_tags'])} tags")
|
417 |
+
|
418 |
+
# Save the tags to a file
|
419 |
+
if result['success']:
|
420 |
+
output_path = save_tags_to_file(
|
421 |
+
image_path=image_path,
|
422 |
+
all_tags=result['all_tags'],
|
423 |
+
custom_dir=save_dir,
|
424 |
+
overwrite=True
|
425 |
+
)
|
426 |
+
result['output_path'] = str(output_path)
|
427 |
+
|
428 |
+
# Store the result
|
429 |
+
results[image_path] = result
|
430 |
+
|
431 |
+
processed += batch_size_actual
|
432 |
+
|
433 |
+
# Calculate batch timing
|
434 |
+
batch_end = time.time()
|
435 |
+
batch_time = batch_end - batch_start
|
436 |
+
print(f"Batch processed in {batch_time:.2f} seconds ({batch_time/batch_size_actual:.2f} seconds per image)")
|
437 |
+
|
438 |
+
except Exception as e:
|
439 |
+
print(f"Error processing batch: {str(e)}")
|
440 |
+
traceback.print_exc()
|
441 |
+
|
442 |
+
# Process failed images one by one as fallback
|
443 |
+
for image_path in batch_files:
|
444 |
+
try:
|
445 |
+
# Update progress if callback provided
|
446 |
+
if progress_callback:
|
447 |
+
progress_callback(processed + j, total_images, image_path)
|
448 |
+
|
449 |
+
# Debug print to track what's happening
|
450 |
+
print(f"Before limiting - Tags for {os.path.basename(image_path)}: {len(result['all_tags'])} tags")
|
451 |
+
print(f"Category limits applied: {category_limits}")
|
452 |
+
|
453 |
+
# Make sure we apply limits right before saving
|
454 |
+
if category_limits and result['success']:
|
455 |
+
# Before counts for debugging
|
456 |
+
before_counts = {cat: len(tags) for cat, tags in result['tags'].items()}
|
457 |
+
|
458 |
+
# Apply the limits
|
459 |
+
result = apply_category_limits(result, category_limits)
|
460 |
+
|
461 |
+
# After counts for debugging
|
462 |
+
after_counts = {cat: len(tags) for cat, tags in result['tags'].items()}
|
463 |
+
|
464 |
+
# Print the effect of limits
|
465 |
+
print(f"Before limits: {before_counts}")
|
466 |
+
print(f"After limits: {after_counts}")
|
467 |
+
print(f"After limiting - Tags for {os.path.basename(image_path)}: {len(result['all_tags'])} tags")
|
468 |
+
|
469 |
+
# Preprocess single image
|
470 |
+
img_array = preprocess_image(image_path)
|
471 |
+
|
472 |
+
# Run inference on single image
|
473 |
+
single_results = tagger.predict_batch(
|
474 |
+
[img_array],
|
475 |
+
threshold=active_threshold,
|
476 |
+
category_thresholds=active_category_thresholds,
|
477 |
+
min_confidence=min_confidence
|
478 |
+
)
|
479 |
+
|
480 |
+
if single_results:
|
481 |
+
result = single_results[0]
|
482 |
+
|
483 |
+
# Save the tags to a file
|
484 |
+
if result['success']:
|
485 |
+
output_path = save_tags_to_file(
|
486 |
+
image_path=image_path,
|
487 |
+
all_tags=result['all_tags'],
|
488 |
+
custom_dir=save_dir,
|
489 |
+
overwrite=True # Add this to be consistent
|
490 |
+
)
|
491 |
+
result['output_path'] = str(output_path)
|
492 |
+
|
493 |
+
# Store the result
|
494 |
+
results[image_path] = result
|
495 |
+
else:
|
496 |
+
results[image_path] = {
|
497 |
+
'success': False,
|
498 |
+
'error': 'Failed to process image',
|
499 |
+
'all_tags': []
|
500 |
+
}
|
501 |
+
|
502 |
+
except Exception as img_e:
|
503 |
+
print(f"Error processing single image {image_path}: {str(img_e)}")
|
504 |
+
results[image_path] = {
|
505 |
+
'success': False,
|
506 |
+
'error': str(img_e),
|
507 |
+
'all_tags': []
|
508 |
+
}
|
509 |
+
|
510 |
+
processed += 1
|
511 |
+
|
512 |
+
# Final progress update
|
513 |
+
if progress_callback:
|
514 |
+
progress_callback(total_images, total_images, None)
|
515 |
+
|
516 |
+
end_time = time.time()
|
517 |
+
total_time = end_time - start_time
|
518 |
+
print(f"Batch processing finished. Total time: {total_time:.2f} seconds, Average: {total_time/total_images:.2f} seconds per image")
|
519 |
+
|
520 |
+
return {
|
521 |
+
'success': True,
|
522 |
+
'total': total_images,
|
523 |
+
'processed': len(results),
|
524 |
+
'results': results,
|
525 |
+
'save_dir': save_dir,
|
526 |
+
'time_elapsed': end_time - start_time
|
527 |
+
}
|
app/utils/ui_components.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
UI components for the Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import streamlit as st
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
|
10 |
+
def display_progress_bar(prob):
|
11 |
+
"""
|
12 |
+
Create an HTML progress bar for displaying probability.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
prob: Probability value between 0 and 1
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
HTML string for the progress bar
|
19 |
+
"""
|
20 |
+
# Convert probability to percentage
|
21 |
+
percentage = int(prob * 100)
|
22 |
+
|
23 |
+
# Choose color based on confidence level
|
24 |
+
if prob >= 0.8:
|
25 |
+
color = "green"
|
26 |
+
elif prob >= 0.5:
|
27 |
+
color = "orange"
|
28 |
+
else:
|
29 |
+
color = "red"
|
30 |
+
|
31 |
+
# Return HTML for a styled progress bar
|
32 |
+
return f"""
|
33 |
+
<div style="margin-bottom: 5px; display: flex; align-items: center;">
|
34 |
+
<div style="flex-grow: 1; background-color: #f0f0f0; border-radius: 3px; height: 8px; position: relative;">
|
35 |
+
<div style="position: absolute; width: {percentage}%; background-color: {color}; height: 8px; border-radius: 3px;"></div>
|
36 |
+
</div>
|
37 |
+
<div style="margin-left: 8px; min-width: 40px; text-align: right; font-size: 0.9em;">{percentage}%</div>
|
38 |
+
</div>
|
39 |
+
"""
|
40 |
+
|
41 |
+
|
42 |
+
def show_example_images(examples_dir):
|
43 |
+
"""
|
44 |
+
Display example images from a directory.
|
45 |
+
|
46 |
+
Args:
|
47 |
+
examples_dir: Directory containing example images
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
Selected image path or None
|
51 |
+
"""
|
52 |
+
selected_image = None
|
53 |
+
|
54 |
+
if os.path.exists(examples_dir):
|
55 |
+
example_files = [f for f in os.listdir(examples_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
56 |
+
|
57 |
+
if example_files:
|
58 |
+
st.write("Select an example image:")
|
59 |
+
|
60 |
+
# Create a 2-column layout for examples
|
61 |
+
example_cols = st.columns(2)
|
62 |
+
|
63 |
+
for i, example_file in enumerate(example_files):
|
64 |
+
col_idx = i % 2
|
65 |
+
with example_cols[col_idx]:
|
66 |
+
example_path = os.path.join(examples_dir, example_file)
|
67 |
+
|
68 |
+
# Display thumbnail
|
69 |
+
try:
|
70 |
+
img = Image.open(example_path)
|
71 |
+
st.image(img, width=150, caption=example_file)
|
72 |
+
|
73 |
+
# Button to select this example
|
74 |
+
if st.button(f"Use", key=f"example_{i}"):
|
75 |
+
selected_image = example_path
|
76 |
+
st.session_state.original_filename = example_file
|
77 |
+
|
78 |
+
# Display full image
|
79 |
+
st.image(img, use_container_width=True)
|
80 |
+
st.success(f"Example '{example_file}' selected!")
|
81 |
+
except Exception as e:
|
82 |
+
st.error(f"Error loading {example_file}: {str(e)}")
|
83 |
+
else:
|
84 |
+
st.info("No example images found.")
|
85 |
+
st.write("Add some JPG or PNG images to the 'examples' directory.")
|
86 |
+
else:
|
87 |
+
st.info("Examples directory not found.")
|
88 |
+
st.write("Create an 'examples' directory and add some JPG or PNG images.")
|
89 |
+
|
90 |
+
return selected_image
|
91 |
+
|
92 |
+
|
93 |
+
def display_batch_results(batch_results):
|
94 |
+
"""
|
95 |
+
Display batch processing results.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
batch_results: Dictionary with batch processing results
|
99 |
+
"""
|
100 |
+
if batch_results['success']:
|
101 |
+
st.success(f"✅ Processed {batch_results['processed']} of {batch_results['total']} images")
|
102 |
+
|
103 |
+
# Show details in an expander
|
104 |
+
with st.expander("Batch Processing Results", expanded=True):
|
105 |
+
# Count successes and failures
|
106 |
+
successes = sum(1 for r in batch_results['results'].values() if r['success'])
|
107 |
+
failures = batch_results['total'] - successes
|
108 |
+
|
109 |
+
st.write(f"- Successfully tagged: {successes}")
|
110 |
+
st.write(f"- Failed to process: {failures}")
|
111 |
+
|
112 |
+
if failures > 0:
|
113 |
+
# Show errors
|
114 |
+
st.write("### Processing Errors")
|
115 |
+
for img_path, result in batch_results['results'].items():
|
116 |
+
if not result['success']:
|
117 |
+
st.write(f"- **{os.path.basename(img_path)}**: {result.get('error', 'Unknown error')}")
|
118 |
+
|
119 |
+
# Show the location of the output files
|
120 |
+
if successes > 0:
|
121 |
+
st.write("### Output Files")
|
122 |
+
st.write(f"Tag files have been saved to the 'saved_tags' folder.")
|
123 |
+
|
124 |
+
# Show the first few as examples
|
125 |
+
st.write("Example outputs:")
|
126 |
+
sample_results = [(path, res) for path, res in batch_results['results'].items() if res['success']][:3]
|
127 |
+
for img_path, result in sample_results:
|
128 |
+
output_path = result.get('output_path', '')
|
129 |
+
if output_path and os.path.exists(output_path):
|
130 |
+
st.write(f"- **{os.path.basename(output_path)}**")
|
131 |
+
|
132 |
+
# Show file contents in a collapsible code block
|
133 |
+
with open(output_path, 'r', encoding='utf-8') as f:
|
134 |
+
content = f.read()
|
135 |
+
st.code(content, language='text')
|
136 |
+
else:
|
137 |
+
st.error(f"Batch processing failed: {batch_results.get('error', 'Unknown error')}")
|
game/dev_tools.py
ADDED
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Developer Tools for Tag Collector Game
|
4 |
+
A hidden panel with tools for testing and debugging game features.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import streamlit as st
|
8 |
+
import random
|
9 |
+
import time
|
10 |
+
from game_constants import (
|
11 |
+
TAG_CURRENCY_NAME,
|
12 |
+
RARITY_LEVELS,
|
13 |
+
ACHIEVEMENTS,
|
14 |
+
)
|
15 |
+
|
16 |
+
from tag_categories import (
|
17 |
+
TAG_CATEGORIES,
|
18 |
+
TAG_DETECTOR_UPGRADES,
|
19 |
+
PROGRESSION_ACHIEVEMENTS
|
20 |
+
)
|
21 |
+
|
22 |
+
def display_dev_tools():
|
23 |
+
"""
|
24 |
+
Display the developer tools interface
|
25 |
+
This should be hidden in production builds or behind a developer toggle
|
26 |
+
"""
|
27 |
+
st.title("🛠️ Developer Tools")
|
28 |
+
st.warning("These tools are for testing and debugging only. They can break game balance!")
|
29 |
+
|
30 |
+
# Create tabs for different tool categories
|
31 |
+
resource_tab, tag_tab, progression_tab, mosaic_tab, reset_tab = st.tabs([
|
32 |
+
"Resources", "Tag Management", "Progression", "Mosaic Tools", "Reset Tools"
|
33 |
+
])
|
34 |
+
|
35 |
+
with resource_tab:
|
36 |
+
display_resource_tools()
|
37 |
+
|
38 |
+
with tag_tab:
|
39 |
+
display_tag_tools()
|
40 |
+
|
41 |
+
with progression_tab:
|
42 |
+
display_progression_tools()
|
43 |
+
|
44 |
+
with mosaic_tab:
|
45 |
+
display_mosaic_tools()
|
46 |
+
|
47 |
+
with reset_tab:
|
48 |
+
display_reset_tools()
|
49 |
+
|
50 |
+
def display_resource_tools():
|
51 |
+
"""Display tools for managing game resources"""
|
52 |
+
st.subheader("Currency and Resources")
|
53 |
+
|
54 |
+
# Add TagCoins
|
55 |
+
col1, col2 = st.columns([3, 1])
|
56 |
+
with col1:
|
57 |
+
amount = st.number_input("Amount of TagCoins to add:", min_value=0, max_value=1000000, value=1000, step=100)
|
58 |
+
with col2:
|
59 |
+
if st.button("Add TagCoins", key="add_currency"):
|
60 |
+
st.session_state.tag_currency += amount
|
61 |
+
st.session_state.game_stats["total_currency_earned"] += amount
|
62 |
+
st.success(f"Added {amount} {TAG_CURRENCY_NAME}!")
|
63 |
+
|
64 |
+
# Set threshold directly
|
65 |
+
col1, col2 = st.columns([3, 1])
|
66 |
+
with col1:
|
67 |
+
threshold = st.slider("Set threshold value:", min_value=0.0, max_value=1.0, value=st.session_state.threshold, step=0.01)
|
68 |
+
with col2:
|
69 |
+
if st.button("Set Threshold", key="set_threshold"):
|
70 |
+
st.session_state.threshold = threshold
|
71 |
+
st.success(f"Set threshold to {threshold:.2f}")
|
72 |
+
|
73 |
+
# Add tag power bonuses
|
74 |
+
col1, col2 = st.columns([3, 1])
|
75 |
+
with col1:
|
76 |
+
power = st.number_input("Add tag power bonus:", min_value=0.0, max_value=0.1, value=0.01, step=0.001, format="%.3f")
|
77 |
+
with col2:
|
78 |
+
if st.button("Add Power", key="add_power"):
|
79 |
+
if not hasattr(st.session_state, 'tag_power_bonus'):
|
80 |
+
st.session_state.tag_power_bonus = 0
|
81 |
+
st.session_state.tag_power_bonus += power
|
82 |
+
st.success(f"Added {power:.3f} tag power!")
|
83 |
+
|
84 |
+
def display_tag_tools():
|
85 |
+
"""Display tools for managing tags"""
|
86 |
+
st.subheader("Tag Management")
|
87 |
+
|
88 |
+
# Add specific tag
|
89 |
+
with st.expander("Add Specific Tag", expanded=True):
|
90 |
+
col1, col2, col3 = st.columns([4, 2, 1])
|
91 |
+
|
92 |
+
with col1:
|
93 |
+
tag_name = st.text_input("Tag name:", value="custom_tag")
|
94 |
+
|
95 |
+
with col2:
|
96 |
+
rarities = list(RARITY_LEVELS.keys())
|
97 |
+
rarity = st.selectbox("Rarity:", rarities)
|
98 |
+
|
99 |
+
with col3:
|
100 |
+
# Get categories from session state or fallback to general
|
101 |
+
categories = ["general", "character", "copyright", "meta", "rating", "artist", "year"]
|
102 |
+
category = st.selectbox("Category:", categories)
|
103 |
+
|
104 |
+
if st.button("Add Tag", key="add_specific_tag"):
|
105 |
+
# Check if tag already exists
|
106 |
+
is_new = tag_name not in st.session_state.collected_tags
|
107 |
+
|
108 |
+
# Add tag to collection
|
109 |
+
st.session_state.collected_tags[tag_name] = {
|
110 |
+
"count": 1,
|
111 |
+
"rarity": rarity,
|
112 |
+
"category": category,
|
113 |
+
"discovery_time": time.strftime("%Y-%m-%d %H:%M:%S")
|
114 |
+
}
|
115 |
+
|
116 |
+
# Show confirmation
|
117 |
+
if is_new:
|
118 |
+
st.success(f"Added new tag '{tag_name}' ({rarity}, {category})")
|
119 |
+
else:
|
120 |
+
st.session_state.collected_tags[tag_name]["count"] += 1
|
121 |
+
st.info(f"Incremented count for existing tag '{tag_name}'")
|
122 |
+
|
123 |
+
# Generate random tags
|
124 |
+
with st.expander("Generate Random Tags", expanded=False):
|
125 |
+
col1, col2 = st.columns([3, 1])
|
126 |
+
|
127 |
+
with col1:
|
128 |
+
num_tags = st.number_input("Number of random tags to generate:", min_value=1, max_value=1000, value=10)
|
129 |
+
|
130 |
+
# Options for distribution
|
131 |
+
advanced = st.checkbox("Advanced options")
|
132 |
+
if advanced:
|
133 |
+
st.write("Rarity distribution:")
|
134 |
+
common_pct = st.slider("Common tags %:", 0, 100, 70)
|
135 |
+
uncommon_pct = st.slider("Uncommon tags %:", 0, 100, 20)
|
136 |
+
rare_pct = st.slider("Rare tags %:", 0, 100, 8)
|
137 |
+
super_rare_pct = st.slider("Super rare tags %:", 0, 100, 2)
|
138 |
+
|
139 |
+
# Ensure total is 100%
|
140 |
+
total = common_pct + uncommon_pct + rare_pct + super_rare_pct
|
141 |
+
if total != 100:
|
142 |
+
st.warning(f"Distribution totals {total}%, should be 100%")
|
143 |
+
|
144 |
+
with col2:
|
145 |
+
generate_button = st.button("Generate", key="generate_random_tags")
|
146 |
+
|
147 |
+
if generate_button:
|
148 |
+
generated_count = 0
|
149 |
+
|
150 |
+
# Determine distribution of rarities
|
151 |
+
if advanced and total == 100:
|
152 |
+
# Custom distribution
|
153 |
+
rarity_weights = {
|
154 |
+
"Whispered Word": common_pct / 100,
|
155 |
+
"Common Canard": uncommon_pct / 100 * 0.6,
|
156 |
+
"Urban Footnote": uncommon_pct / 100 * 0.4,
|
157 |
+
"Urban Myth": rare_pct / 100 * 0.5,
|
158 |
+
"Urban Legend": rare_pct / 100 * 0.5,
|
159 |
+
"Urban Nightmare": super_rare_pct / 100 * 0.8,
|
160 |
+
"Impuritas Civitas": super_rare_pct / 100 * 0.2
|
161 |
+
}
|
162 |
+
else:
|
163 |
+
# Default distribution
|
164 |
+
rarity_weights = {
|
165 |
+
"Whispered Word": 0.70,
|
166 |
+
"Common Canard": 0.15,
|
167 |
+
"Urban Footnote": 0.08,
|
168 |
+
"Urban Myth": 0.04,
|
169 |
+
"Urban Legend": 0.02,
|
170 |
+
"Urban Nightmare": 0.008,
|
171 |
+
"Impuritas Civitas": 0.002
|
172 |
+
}
|
173 |
+
|
174 |
+
# Generate the tags
|
175 |
+
for i in range(num_tags):
|
176 |
+
# Create a random tag name if we don't have metadata
|
177 |
+
tag_name = f"random_tag_{int(time.time() % 10000)}_{i}"
|
178 |
+
|
179 |
+
# Determine rarity
|
180 |
+
rarity = random.choices(
|
181 |
+
list(rarity_weights.keys()),
|
182 |
+
weights=list(rarity_weights.values()),
|
183 |
+
k=1
|
184 |
+
)[0]
|
185 |
+
|
186 |
+
# Determine category
|
187 |
+
categories = list(TAG_CATEGORIES.keys())
|
188 |
+
category = random.choice(categories)
|
189 |
+
|
190 |
+
# Add to collection
|
191 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
192 |
+
|
193 |
+
# Check if this is a new tag
|
194 |
+
is_new = tag_name not in st.session_state.collected_tags
|
195 |
+
|
196 |
+
if is_new:
|
197 |
+
st.session_state.collected_tags[tag_name] = {
|
198 |
+
"count": 1,
|
199 |
+
"rarity": rarity,
|
200 |
+
"category": category,
|
201 |
+
"discovery_time": timestamp
|
202 |
+
}
|
203 |
+
generated_count += 1
|
204 |
+
else:
|
205 |
+
# Increment count if already exists
|
206 |
+
st.session_state.collected_tags[tag_name]["count"] += 1
|
207 |
+
|
208 |
+
# Show confirmation
|
209 |
+
st.success(f"Generated {generated_count} new random tags!")
|
210 |
+
|
211 |
+
def display_progression_tools():
|
212 |
+
"""Display tools for managing progression"""
|
213 |
+
st.subheader("Progression System Tools")
|
214 |
+
|
215 |
+
# Unlock categories
|
216 |
+
with st.expander("Unlock Categories", expanded=True):
|
217 |
+
st.write("Select categories to unlock:")
|
218 |
+
|
219 |
+
# Get currently unlocked categories
|
220 |
+
unlocked = []
|
221 |
+
if hasattr(st.session_state, 'unlocked_tag_categories'):
|
222 |
+
unlocked = st.session_state.unlocked_tag_categories
|
223 |
+
|
224 |
+
# Display each category with a checkbox
|
225 |
+
category_checkboxes = {}
|
226 |
+
for category, info in TAG_CATEGORIES.items():
|
227 |
+
# Skip default unlocked
|
228 |
+
if info["unlocked_by_default"]:
|
229 |
+
continue
|
230 |
+
|
231 |
+
# Check if already unlocked
|
232 |
+
is_unlocked = category in unlocked
|
233 |
+
category_checkboxes[category] = st.checkbox(
|
234 |
+
f"{info['name']} ({info['cost']} {TAG_CURRENCY_NAME})",
|
235 |
+
value=is_unlocked,
|
236 |
+
key=f"cat_{category}"
|
237 |
+
)
|
238 |
+
|
239 |
+
# Button to apply changes
|
240 |
+
if st.button("Apply Category Changes", key="apply_categories"):
|
241 |
+
# Initialize if needed
|
242 |
+
if not hasattr(st.session_state, 'unlocked_tag_categories'):
|
243 |
+
st.session_state.unlocked_tag_categories = []
|
244 |
+
|
245 |
+
# Add default unlocked
|
246 |
+
for cat, info in TAG_CATEGORIES.items():
|
247 |
+
if info["unlocked_by_default"]:
|
248 |
+
st.session_state.unlocked_tag_categories.append(cat)
|
249 |
+
|
250 |
+
# Update unlocked categories
|
251 |
+
for category, checked in category_checkboxes.items():
|
252 |
+
# If checked but not unlocked, add it
|
253 |
+
if checked and category not in st.session_state.unlocked_tag_categories:
|
254 |
+
st.session_state.unlocked_tag_categories.append(category)
|
255 |
+
st.success(f"Unlocked {TAG_CATEGORIES[category]['name']}!")
|
256 |
+
|
257 |
+
# If unchecked but unlocked, remove it
|
258 |
+
elif not checked and category in st.session_state.unlocked_tag_categories:
|
259 |
+
st.session_state.unlocked_tag_categories.remove(category)
|
260 |
+
st.info(f"Locked {TAG_CATEGORIES[category]['name']}")
|
261 |
+
|
262 |
+
# Upgrade detector level
|
263 |
+
with st.expander("Set Detector Level", expanded=False):
|
264 |
+
# Get current level
|
265 |
+
current_level = 0
|
266 |
+
if hasattr(st.session_state, 'detector_level'):
|
267 |
+
current_level = st.session_state.detector_level
|
268 |
+
|
269 |
+
# Display slider for detector level
|
270 |
+
new_level = st.slider(
|
271 |
+
"Detector Level:",
|
272 |
+
min_value=0,
|
273 |
+
max_value=len(TAG_DETECTOR_UPGRADES) - 1,
|
274 |
+
value=current_level
|
275 |
+
)
|
276 |
+
|
277 |
+
# Show info about selected level
|
278 |
+
upgrade = TAG_DETECTOR_UPGRADES[new_level]
|
279 |
+
max_tags = upgrade["max_tags"]
|
280 |
+
if max_tags == 0:
|
281 |
+
st.write(f"Selected: {upgrade['name']} (Unlimited tags)")
|
282 |
+
else:
|
283 |
+
st.write(f"Selected: {upgrade['name']} ({max_tags} tags)")
|
284 |
+
|
285 |
+
# Button to apply changes
|
286 |
+
if st.button("Set Detector Level", key="set_detector_level"):
|
287 |
+
st.session_state.detector_level = new_level
|
288 |
+
st.success(f"Set detector level to {new_level} ({upgrade['name']})")
|
289 |
+
|
290 |
+
# Unlock achievements
|
291 |
+
with st.expander("Manage Achievements", expanded=False):
|
292 |
+
# Combine standard and progression achievements
|
293 |
+
all_achievements = {**ACHIEVEMENTS, **PROGRESSION_ACHIEVEMENTS}
|
294 |
+
|
295 |
+
# Initialize achievements if needed
|
296 |
+
if not hasattr(st.session_state, 'achievements'):
|
297 |
+
st.session_state.achievements = set()
|
298 |
+
|
299 |
+
# Create tabs for unlocked and locked
|
300 |
+
unlocked_tab, locked_tab = st.tabs(["Unlocked", "Locked"])
|
301 |
+
|
302 |
+
with unlocked_tab:
|
303 |
+
st.write("Currently unlocked achievements:")
|
304 |
+
|
305 |
+
# Show unlocked achievements with option to remove
|
306 |
+
for achievement_id in sorted(st.session_state.achievements):
|
307 |
+
if achievement_id in all_achievements:
|
308 |
+
col1, col2 = st.columns([3, 1])
|
309 |
+
|
310 |
+
with col1:
|
311 |
+
achievement = all_achievements[achievement_id]
|
312 |
+
st.write(f"**{achievement['name']}**: {achievement['description']}")
|
313 |
+
|
314 |
+
with col2:
|
315 |
+
if st.button("Remove", key=f"remove_{achievement_id}"):
|
316 |
+
st.session_state.achievements.remove(achievement_id)
|
317 |
+
st.info(f"Removed achievement: {achievement['name']}")
|
318 |
+
st.rerun()
|
319 |
+
|
320 |
+
with locked_tab:
|
321 |
+
st.write("Currently locked achievements:")
|
322 |
+
|
323 |
+
# Show locked achievements with option to add
|
324 |
+
locked_achievements = [a for a in all_achievements if a not in st.session_state.achievements]
|
325 |
+
|
326 |
+
for achievement_id in sorted(locked_achievements):
|
327 |
+
col1, col2 = st.columns([3, 1])
|
328 |
+
|
329 |
+
with col1:
|
330 |
+
achievement = all_achievements[achievement_id]
|
331 |
+
st.write(f"**{achievement['name']}**: {achievement['description']}")
|
332 |
+
|
333 |
+
with col2:
|
334 |
+
if st.button("Unlock", key=f"unlock_{achievement_id}"):
|
335 |
+
st.session_state.achievements.add(achievement_id)
|
336 |
+
|
337 |
+
# Apply rewards if applicable
|
338 |
+
if "reward" in achievement:
|
339 |
+
from scan_handler import apply_achievement_reward
|
340 |
+
apply_achievement_reward(achievement_id, achievement["reward"])
|
341 |
+
|
342 |
+
st.success(f"Unlocked achievement: {achievement['name']}")
|
343 |
+
st.rerun()
|
344 |
+
|
345 |
+
def display_mosaic_tools():
|
346 |
+
"""Display tools for managing the tag mosaic"""
|
347 |
+
st.subheader("Tag Mosaic Tools")
|
348 |
+
|
349 |
+
# Check if mosaic exists
|
350 |
+
has_mosaic = hasattr(st.session_state, 'tag_mosaic')
|
351 |
+
|
352 |
+
if not has_mosaic:
|
353 |
+
st.warning("Tag Mosaic not initialized yet. Visit the Tag Collection tab first.")
|
354 |
+
return
|
355 |
+
|
356 |
+
# Fill random portions of the mosaic
|
357 |
+
with st.expander("Fill Random Portions", expanded=True):
|
358 |
+
col1, col2 = st.columns([3, 1])
|
359 |
+
|
360 |
+
with col1:
|
361 |
+
fill_percentage = st.slider(
|
362 |
+
"Percentage to fill:",
|
363 |
+
min_value=0,
|
364 |
+
max_value=100,
|
365 |
+
value=10,
|
366 |
+
step=1
|
367 |
+
)
|
368 |
+
|
369 |
+
# Options for distribution
|
370 |
+
st.write("Fill with tags of rarity:")
|
371 |
+
fill_rarities = {}
|
372 |
+
for rarity in RARITY_LEVELS:
|
373 |
+
fill_rarities[rarity] = st.checkbox(rarity, value=True, key=f"fill_{rarity}")
|
374 |
+
|
375 |
+
with col2:
|
376 |
+
fill_button = st.button("Fill Mosaic", key="fill_mosaic")
|
377 |
+
|
378 |
+
if fill_button:
|
379 |
+
# Get the mosaic from session state
|
380 |
+
mosaic = st.session_state.tag_mosaic
|
381 |
+
|
382 |
+
# Calculate how many cells to fill
|
383 |
+
total_cells = mosaic.total_cells
|
384 |
+
existing_filled = len(mosaic.filled_cells)
|
385 |
+
target_filled = int(total_cells * fill_percentage / 100)
|
386 |
+
cells_to_add = max(0, target_filled - existing_filled)
|
387 |
+
|
388 |
+
# Get active rarities
|
389 |
+
active_rarities = [r for r, checked in fill_rarities.items() if checked]
|
390 |
+
if not active_rarities:
|
391 |
+
st.error("Select at least one rarity to fill with")
|
392 |
+
return
|
393 |
+
|
394 |
+
# Create artificial tags and add them
|
395 |
+
added_count = 0
|
396 |
+
added_tags = {}
|
397 |
+
|
398 |
+
# Generate random positions
|
399 |
+
all_positions = [(x, y) for x in range(mosaic.grid_width) for y in range(mosaic.grid_height)]
|
400 |
+
# Remove already filled positions
|
401 |
+
available_positions = [pos for pos in all_positions if pos not in mosaic.filled_cells]
|
402 |
+
|
403 |
+
# If we need more than available, just use what's available
|
404 |
+
cells_to_add = min(cells_to_add, len(available_positions))
|
405 |
+
|
406 |
+
# Randomly select positions
|
407 |
+
selected_positions = random.sample(available_positions, cells_to_add)
|
408 |
+
|
409 |
+
# Create tags for each position
|
410 |
+
for pos in selected_positions:
|
411 |
+
x, y = pos
|
412 |
+
|
413 |
+
# Create a tag name
|
414 |
+
tag_name = f"mosaic_fill_{x}_{y}_{int(time.time() % 10000)}"
|
415 |
+
|
416 |
+
# Select a random rarity from active rarities
|
417 |
+
rarity = random.choice(active_rarities)
|
418 |
+
|
419 |
+
# Add to tags dictionary (this won't be saved to session_state)
|
420 |
+
added_tags[tag_name] = {
|
421 |
+
"count": 1,
|
422 |
+
"rarity": rarity,
|
423 |
+
"category": "general"
|
424 |
+
}
|
425 |
+
|
426 |
+
added_count += 1
|
427 |
+
|
428 |
+
# Update the mosaic (this does save to disk)
|
429 |
+
if added_count > 0:
|
430 |
+
mosaic.update_with_tags(added_tags)
|
431 |
+
st.success(f"Added {added_count} random cells to the mosaic!")
|
432 |
+
|
433 |
+
# Show updated stats
|
434 |
+
stats = mosaic.get_stats()
|
435 |
+
st.write(f"New completion: {stats['completion_percentage']:.2f}%")
|
436 |
+
st.write(f"Emerging pattern: {stats['completion_pattern']}")
|
437 |
+
|
438 |
+
# Show image
|
439 |
+
mosaic_img = mosaic.get_image(show_highlights=True)
|
440 |
+
st.image(mosaic_img, caption="Updated Mosaic", width=400)
|
441 |
+
else:
|
442 |
+
st.info("No new cells added. Mosaic may already be filled to the requested level.")
|
443 |
+
|
444 |
+
# Reset mosaic without affecting collection
|
445 |
+
with st.expander("Reset Mosaic", expanded=False):
|
446 |
+
if st.button("Reset Mosaic", key="reset_mosaic"):
|
447 |
+
# Confirm
|
448 |
+
confirm = st.checkbox("I understand this will clear the mosaic visualization (not your collection)")
|
449 |
+
|
450 |
+
if confirm:
|
451 |
+
# Get the mosaic from session state
|
452 |
+
mosaic = st.session_state.tag_mosaic
|
453 |
+
|
454 |
+
# Reset the mosaic by creating a new one
|
455 |
+
from tag_mosaic import TagMosaic
|
456 |
+
st.session_state.tag_mosaic = TagMosaic()
|
457 |
+
|
458 |
+
# Delete the mosaic save file
|
459 |
+
import os
|
460 |
+
if os.path.exists("tag_mosaic.png"):
|
461 |
+
try:
|
462 |
+
os.remove("tag_mosaic.png")
|
463 |
+
except Exception as e:
|
464 |
+
st.error(f"Error removing mosaic file: {e}")
|
465 |
+
|
466 |
+
st.success("Mosaic has been reset!")
|
467 |
+
|
468 |
+
def display_reset_tools():
|
469 |
+
"""Display tools for resetting the game"""
|
470 |
+
st.subheader("Reset Tools")
|
471 |
+
st.warning("These tools will reset parts of your game progress. Use with caution!")
|
472 |
+
|
473 |
+
# Reset currency
|
474 |
+
with st.expander("Reset Currency", expanded=False):
|
475 |
+
col1, col2 = st.columns([3, 1])
|
476 |
+
|
477 |
+
with col1:
|
478 |
+
new_amount = st.number_input("Set currency to:", min_value=0, value=0)
|
479 |
+
|
480 |
+
with col2:
|
481 |
+
if st.button("Reset Currency", key="reset_currency"):
|
482 |
+
st.session_state.tag_currency = new_amount
|
483 |
+
st.success(f"Reset currency to {new_amount} {TAG_CURRENCY_NAME}")
|
484 |
+
|
485 |
+
# Reset collection
|
486 |
+
with st.expander("Reset Collection", expanded=False):
|
487 |
+
st.write("This will remove all collected tags or specific rarities.")
|
488 |
+
|
489 |
+
# Options to keep certain rarities
|
490 |
+
st.write("Keep tags with these rarities:")
|
491 |
+
keep_rarities = {}
|
492 |
+
for rarity in RARITY_LEVELS:
|
493 |
+
keep_rarities[rarity] = st.checkbox(rarity, value=False, key=f"keep_{rarity}")
|
494 |
+
|
495 |
+
if st.button("Reset Collection", key="reset_collection"):
|
496 |
+
# Confirm
|
497 |
+
confirm = st.checkbox("I understand this will delete collected tags")
|
498 |
+
|
499 |
+
if confirm:
|
500 |
+
# Get rarities to keep
|
501 |
+
rarities_to_keep = [r for r, checked in keep_rarities.items() if checked]
|
502 |
+
|
503 |
+
# If keeping some rarities, filter the collection
|
504 |
+
if rarities_to_keep:
|
505 |
+
# Create a new collection with only the kept rarities
|
506 |
+
kept_tags = {}
|
507 |
+
for tag, info in st.session_state.collected_tags.items():
|
508 |
+
if info.get("rarity") in rarities_to_keep:
|
509 |
+
kept_tags[tag] = info
|
510 |
+
|
511 |
+
# Replace the collection
|
512 |
+
removed_count = len(st.session_state.collected_tags) - len(kept_tags)
|
513 |
+
st.session_state.collected_tags = kept_tags
|
514 |
+
st.success(f"Removed {removed_count} tags. Kept {len(kept_tags)} tags with rarities: {', '.join(rarities_to_keep)}")
|
515 |
+
else:
|
516 |
+
# Remove all tags
|
517 |
+
removed_count = len(st.session_state.collected_tags)
|
518 |
+
st.session_state.collected_tags = {}
|
519 |
+
st.success(f"Removed all {removed_count} tags from your collection")
|
520 |
+
|
521 |
+
# Reset complete game
|
522 |
+
with st.expander("Reset ENTIRE Game", expanded=False):
|
523 |
+
st.error("This will reset ALL game progress including collection, currency, achievements, and upgrades.")
|
524 |
+
|
525 |
+
if st.button("Reset EVERYTHING", key="reset_everything"):
|
526 |
+
# Double confirm
|
527 |
+
confirm1 = st.checkbox("I understand ALL progress will be lost")
|
528 |
+
confirm2 = st.checkbox("This cannot be undone")
|
529 |
+
|
530 |
+
if confirm1 and confirm2:
|
531 |
+
# Reset everything
|
532 |
+
st.session_state.threshold = 0.25 # Default starting threshold
|
533 |
+
st.session_state.tag_currency = 0
|
534 |
+
st.session_state.collected_tags = {}
|
535 |
+
st.session_state.purchased_upgrades = []
|
536 |
+
st.session_state.achievements = set()
|
537 |
+
st.session_state.tag_history = []
|
538 |
+
st.session_state.current_scan = None
|
539 |
+
st.session_state.game_stats = {
|
540 |
+
"images_processed": 0,
|
541 |
+
"total_tags_found": 0,
|
542 |
+
"total_currency_earned": 0,
|
543 |
+
"currency_spent": 0
|
544 |
+
}
|
545 |
+
|
546 |
+
# Reset progression
|
547 |
+
if hasattr(st.session_state, 'unlocked_tag_categories'):
|
548 |
+
st.session_state.unlocked_tag_categories = []
|
549 |
+
|
550 |
+
# Add default unlocked categories
|
551 |
+
for cat, info in TAG_CATEGORIES.items():
|
552 |
+
if info["unlocked_by_default"]:
|
553 |
+
st.session_state.unlocked_tag_categories.append(cat)
|
554 |
+
|
555 |
+
if hasattr(st.session_state, 'detector_level'):
|
556 |
+
st.session_state.detector_level = 0
|
557 |
+
|
558 |
+
if hasattr(st.session_state, 'tag_power_bonus'):
|
559 |
+
st.session_state.tag_power_bonus = 0
|
560 |
+
|
561 |
+
if hasattr(st.session_state, 'coin_multiplier'):
|
562 |
+
st.session_state.coin_multiplier = 1.0
|
563 |
+
|
564 |
+
if hasattr(st.session_state, 'essence_generator_count'):
|
565 |
+
st.session_state.essence_generator_count = 0
|
566 |
+
|
567 |
+
# Reset mosaic
|
568 |
+
import os
|
569 |
+
if os.path.exists("tag_mosaic.png"):
|
570 |
+
try:
|
571 |
+
os.remove("tag_mosaic.png")
|
572 |
+
except Exception as e:
|
573 |
+
st.error(f"Error removing mosaic file: {e}")
|
574 |
+
|
575 |
+
if hasattr(st.session_state, 'tag_mosaic'):
|
576 |
+
from tag_mosaic import TagMosaic
|
577 |
+
st.session_state.tag_mosaic = TagMosaic()
|
578 |
+
|
579 |
+
st.success("Game completely reset to initial state!")
|
580 |
+
st.info("Refresh the page to see changes take effect")
|
game/essence_generator.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
game/game.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
game/game_constants.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Game Constants for Tag Collector Game - Updated with new tag rarity names
|
4 |
+
This file contains shared constants used by both the main game and the library system.
|
5 |
+
"""
|
6 |
+
|
7 |
+
# Game currency names
|
8 |
+
TAG_CURRENCY_NAME = "TagCoins"
|
9 |
+
ENKEPHALIN_CURRENCY_NAME = "Enkephalin"
|
10 |
+
ENKEPHALIN_ICON = "💧"
|
11 |
+
|
12 |
+
STARTING_THRESHOLD = 0.55
|
13 |
+
MIN_THRESHOLD = 0.1
|
14 |
+
|
15 |
+
# Tag animations and theme settings
|
16 |
+
TAG_ANIMATIONS = {
|
17 |
+
"Star of the City": {
|
18 |
+
"css_class": "star-of-city",
|
19 |
+
"animation": """
|
20 |
+
@keyframes glowing {
|
21 |
+
0% { box-shadow: 0 0 5px #FFD700; }
|
22 |
+
50% { box-shadow: 0 0 20px #FFD700; }
|
23 |
+
100% { box-shadow: 0 0 5px #FFD700; }
|
24 |
+
}
|
25 |
+
.star-of-city {
|
26 |
+
background-color: rgba(255, 215, 0, 0.2);
|
27 |
+
padding: 8px;
|
28 |
+
border-radius: 5px;
|
29 |
+
border: 2px solid gold;
|
30 |
+
animation: glowing 2s infinite;
|
31 |
+
}
|
32 |
+
"""
|
33 |
+
},
|
34 |
+
"Impuritas Civitas": {
|
35 |
+
"css_class": "impuritas-civitas",
|
36 |
+
"animation": """
|
37 |
+
@keyframes rainbow-border {
|
38 |
+
0% { border-color: red; }
|
39 |
+
14% { border-color: orange; }
|
40 |
+
28% { border-color: yellow; }
|
41 |
+
42% { border-color: green; }
|
42 |
+
57% { border-color: blue; }
|
43 |
+
71% { border-color: indigo; }
|
44 |
+
85% { border-color: violet; }
|
45 |
+
100% { border-color: red; }
|
46 |
+
}
|
47 |
+
|
48 |
+
@keyframes rainbow-text {
|
49 |
+
0% { color: red; }
|
50 |
+
14% { color: orange; }
|
51 |
+
28% { color: yellow; }
|
52 |
+
42% { color: green; }
|
53 |
+
57% { color: blue; }
|
54 |
+
71% { color: indigo; }
|
55 |
+
85% { color: violet; }
|
56 |
+
100% { color: red; }
|
57 |
+
}
|
58 |
+
|
59 |
+
@keyframes rainbow-bg {
|
60 |
+
0% { background-color: rgba(255,0,0,0.1); }
|
61 |
+
14% { background-color: rgba(255,165,0,0.1); }
|
62 |
+
28% { background-color: rgba(255,255,0,0.1); }
|
63 |
+
42% { background-color: rgba(0,128,0,0.1); }
|
64 |
+
57% { background-color: rgba(0,0,255,0.1); }
|
65 |
+
71% { background-color: rgba(75,0,130,0.1); }
|
66 |
+
85% { background-color: rgba(238,130,238,0.1); }
|
67 |
+
100% { background-color: rgba(255,0,0,0.1); }
|
68 |
+
}
|
69 |
+
|
70 |
+
.impuritas-civitas {
|
71 |
+
background-color: rgba(0, 0, 0, 0.1);
|
72 |
+
padding: 10px;
|
73 |
+
border-radius: 5px;
|
74 |
+
border: 3px solid red;
|
75 |
+
animation: rainbow-border 4s linear infinite, rainbow-bg 4s linear infinite;
|
76 |
+
}
|
77 |
+
|
78 |
+
.impuritas-text {
|
79 |
+
font-weight: bold;
|
80 |
+
animation: rainbow-text 4s linear infinite;
|
81 |
+
}
|
82 |
+
"""
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
# Rarity levels with appropriate colors (updated to match new rarity tiers)
|
87 |
+
RARITY_LEVELS = {
|
88 |
+
"Canard": {"color": "#AAAAAA", "value": 1}, # Gray
|
89 |
+
"Urban Myth": {"color": "#5D9C59", "value": 5}, # Green
|
90 |
+
"Urban Legend": {"color": "#2196F3", "value": 10}, # Blue
|
91 |
+
"Urban Plague": {"color": "#9C27B0", "value": 25}, # Purple
|
92 |
+
"Urban Nightmare": {"color": "#FF9800", "value": 50}, # Orange
|
93 |
+
"Star of the City": {"color": "#FFEB3B", "value": 250}, # Yellow/Gold
|
94 |
+
"Impuritas Civitas": {"color": "#F44336", "value": 1000} # Red
|
95 |
+
}
|
96 |
+
|
97 |
+
# Essence generation costs in enkephalin
|
98 |
+
ESSENCE_COSTS = {
|
99 |
+
"Canard": 10, # Common tags
|
100 |
+
"Urban Myth": 30, # Uncommon tags
|
101 |
+
"Urban Legend": 75, # Rare tags
|
102 |
+
"Urban Plague": 150, # Very rare tags
|
103 |
+
"Urban Nightmare": 300, # Extremely rare tags
|
104 |
+
"Star of the City": 600, # Nearly mythical tags
|
105 |
+
"Impuritas Civitas": 1200 # Legendary tags
|
106 |
+
}
|
107 |
+
|
108 |
+
# Tag power system
|
109 |
+
TAG_POWER_BONUSES = {
|
110 |
+
"Canard": {"coin_multiplier": 0, "enkephalin_reward": 0},
|
111 |
+
"Urban Myth": {"coin_multiplier": 0, "enkephalin_reward": 0},
|
112 |
+
"Urban Legend": {"coin_multiplier": 0, "enkephalin_reward": 1},
|
113 |
+
"Urban Plague": {"coin_multiplier": 0.001, "enkephalin_reward": 3},
|
114 |
+
"Urban Nightmare": {"coin_multiplier": 0.0025, "enkephalin_reward": 5},
|
115 |
+
"Star of the City": {"coin_multiplier": 0.005, "enkephalin_reward": 10},
|
116 |
+
"Impuritas Civitas": {"coin_multiplier": 0.01, "enkephalin_reward": 25}
|
117 |
+
}
|
118 |
+
|
119 |
+
THRESHOLD_UPGRADES = [
|
120 |
+
{
|
121 |
+
"name": "Pattern Recognition Module",
|
122 |
+
"threshold_setting": 0.48367345, # High precision threshold
|
123 |
+
"cost": 300,
|
124 |
+
"description": "Basic algorithm focused on high-precision identification. Reduces false positives but may miss some tags."
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"name": "Neural Network Enhancement",
|
128 |
+
"threshold_setting": 0.40000000,
|
129 |
+
"cost": 500,
|
130 |
+
"description": "Improved tag detection using multi-layer perceptrons. Offers good precision with moderate recall."
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"name": "Deep Learning Framework",
|
134 |
+
"threshold_setting": 0.35000000,
|
135 |
+
"cost": 1000,
|
136 |
+
"description": "Advanced algorithms that learn from previous scans. Provides better balance between precision and recall."
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"name": "Quantum Probability Engine",
|
140 |
+
"threshold_setting": 0.32857141, # Balanced optimal F1 score threshold
|
141 |
+
"cost": 2500,
|
142 |
+
"description": "Leverages quantum uncertainty for optimal detection balance. Perfect calibration point for F1 score."
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"name": "Recursive Self-Improvement",
|
146 |
+
"threshold_setting": 0.31224489, # Weighted F1 threshold
|
147 |
+
"cost": 7500,
|
148 |
+
"description": "Scanner enhances its own detection capabilities. Optimized for weighted tag discovery."
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"name": "Consciousness Emulation",
|
152 |
+
"threshold_setting": 0.25000000,
|
153 |
+
"cost": 15000,
|
154 |
+
"description": "Scanner develops intuition-like abilities. Favors higher recall while maintaining reasonable precision."
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"name": "Technological Singularity",
|
158 |
+
"threshold_setting": 0.20612246, # High recall threshold
|
159 |
+
"cost": 50000,
|
160 |
+
"description": "The scanner transcends conventional limitations. Maximizes tag discovery at the cost of some precision."
|
161 |
+
}
|
162 |
+
]
|
163 |
+
|
164 |
+
# Achievements
|
165 |
+
ACHIEVEMENTS = {
|
166 |
+
# Collection achievements
|
167 |
+
"tag_collector_beginner": {"name": "Novice Archivist", "requirement": 50, "description": "Collect 50 different tags", "reward": {"coin_bonus": 0.01}},
|
168 |
+
"tag_collector_expert": {"name": "Senior Cataloger", "requirement": 250, "description": "Collect 250 different tags", "reward": {"coin_bonus": 0.01}},
|
169 |
+
"tag_collector_master": {"name": "Master Librarian", "requirement": 500, "description": "Collect 500 different tags", "reward": {"coin_bonus": 0.01}},
|
170 |
+
"tag_master": {"name": "Grand Archivist", "requirement": 1000, "description": "Collect 1000 different tags", "reward": {"coin_bonus": 0.01}},
|
171 |
+
|
172 |
+
# Rarity achievements
|
173 |
+
"legendary_hunter": {"name": "Impuritas Seeker", "requirement": 1, "description": "Find your first Impuritas Civitas tag", "reward": {"coin_bonus": 0.01, "enkephalin": 50}},
|
174 |
+
"multi_legendary": {"name": "Forbidden Collection", "requirement": 5, "description": "Collect 5 Impuritas Civitas tags", "reward": {"coin_bonus": 0.01, "enkephalin": 100}},
|
175 |
+
"canard_collector": {"name": "Canard Chronicler", "requirement": 30, "description": "Collect 30 Canard tags", "reward": {"coin_bonus": 0.01}},
|
176 |
+
"urban_myth_collector": {"name": "Myth Curator", "requirement": 15, "description": "Collect 15 Urban Myth tags", "reward": {"coin_bonus": 0.01}},
|
177 |
+
"urban_legend_collector": {"name": "Legend Preserver", "requirement": 10, "description": "Collect 10 Urban Legend tags", "reward": {"coin_bonus": 0.01}},
|
178 |
+
"urban_plague_collector": {"name": "Plague Archivist", "requirement": 5, "description": "Collect 5 Urban Plague tags", "reward": {"coin_bonus": 0.01}},
|
179 |
+
"urban_nightmare_collector": {"name": "Nightmare Keeper", "requirement": 5, "description": "Collect 5 Urban Nightmare tags", "reward": {"coin_bonus": 0.01}},
|
180 |
+
"star_collector": {"name": "Star Collector", "requirement": 3, "description": "Collect 3 Star of the City tags", "reward": {"coin_bonus": 0.01, "enkephalin": 30}},
|
181 |
+
"impuritas_collector": {"name": "Impuritas Scholar", "requirement": 3, "description": "Collect 3 Impuritas Civitas tags", "reward": {"coin_bonus": 0.01, "enkephalin": 75}},
|
182 |
+
|
183 |
+
# Progress achievements
|
184 |
+
"perfect_scanner": {"name": "Omniscient Observer", "description": "Reach the minimum threshold", "reward": {"coin_bonus": 0.01}},
|
185 |
+
"optimal_threshold": {"name": "Perfect Calibration", "description": "Reach the optimal F1 score threshold of 0.328", "reward": {"coin_bonus": 0.01}},
|
186 |
+
"collection_milestone_100": {"name": "Century Collector", "requirement": 100, "description": "Collect 100 different tags", "reward": {"tagcoins": 100, "coin_bonus": 0.01}},
|
187 |
+
"collection_milestone_1000": {"name": "Millennium Collector", "requirement": 1000, "description": "Collect 1000 different tags", "reward": {"tagcoins": 1000, "coin_bonus": 0.01}},
|
188 |
+
"collection_milestone_5000": {"name": "Epic Collector", "requirement": 5000, "description": "Collect 5000 different tags", "reward": {"tagcoins": 5000, "coin_bonus": 0.01}},
|
189 |
+
|
190 |
+
# Essence & library achievements
|
191 |
+
"essence_creator": {"name": "Essence Creator", "requirement": 5, "description": "Generate 5 tag essences", "reward": {"essence_cost_reduction": 0.2, "coin_bonus": 0.01}},
|
192 |
+
"tag_explorer": {"name": "Tag Explorer", "requirement": 20, "description": "Explore all library tiers", "reward": {"library_cost_reduction": 0.15, "coin_bonus": 0.01}},
|
193 |
+
"enkephalin_master": {"name": "Enkephalin Master", "requirement": 5000, "description": "Generate 5000 Enkephalin", "reward": {"essence_cost_reduction": 0.25, "coin_bonus": 0.01}},
|
194 |
+
"sacrifice_devotee": {"name": "Sacrifice Devotee", "requirement": 100, "description": "Sacrifice 100 tags", "reward": {"enkephalin_bonus": 0.2, "coin_bonus": 0.01}},
|
195 |
+
|
196 |
+
# New achievements
|
197 |
+
"category_explorer": {"name": "Category Explorer", "requirement": 10, "description": "Collect tags from 10 different categories", "reward": {"coin_bonus": 0.01}},
|
198 |
+
"series_collector": {"name": "Series Collector", "requirement": 3, "description": "Complete 3 series mosaics", "reward": {"coin_bonus": 0.01, "enkephalin": 25}},
|
199 |
+
"rapid_tagger": {"name": "Rapid Tagger", "requirement": 100, "description": "Scan 100 images", "reward": {"coin_bonus": 0.01}},
|
200 |
+
"enkephalin_harvester": {"name": "Enkephalin Harvester", "requirement": 1000, "description": "Generate 1000 Enkephalin", "reward": {"enkephalin_bonus": 0.1, "coin_bonus": 0.01}},
|
201 |
+
"library_scholar": {"name": "Library Scholar", "requirement": 50, "description": "Extract 50 tags from the library", "reward": {"library_cost_reduction": 0.1, "coin_bonus": 0.01}},
|
202 |
+
"rarity_hunter": {"name": "Rarity Hunter", "description": "Find tags of all rarity levels", "reward": {"coin_bonus": 0.02}},
|
203 |
+
"essence_master": {"name": "Essence Master", "requirement": 25, "description": "Generate 25 tag essences", "reward": {"essence_cost_reduction": 0.15, "coin_bonus": 0.01}},
|
204 |
+
"legendary_librarian": {"name": "Legendary Librarian", "description": "Extract an Impuritas Civitas tag from the library", "reward": {"library_cost_reduction": 0.2, "coin_bonus": 0.01, "enkephalin": 100}}
|
205 |
+
}
|
game/library_system.py
ADDED
@@ -0,0 +1,2010 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Redesigned Library System with Instant Discovery and Cooldown for Tag Collector Game
|
3 |
+
"""
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
import random
|
7 |
+
import time
|
8 |
+
import math
|
9 |
+
import pandas as pd
|
10 |
+
import datetime
|
11 |
+
from game_constants import (
|
12 |
+
TAG_CURRENCY_NAME,
|
13 |
+
RARITY_LEVELS,
|
14 |
+
ENKEPHALIN_CURRENCY_NAME,
|
15 |
+
ENKEPHALIN_ICON,
|
16 |
+
TAG_POWER_BONUSES
|
17 |
+
)
|
18 |
+
from essence_generator import display_essence_generator
|
19 |
+
from tag_categories import (
|
20 |
+
TAG_CATEGORIES,
|
21 |
+
get_collection_power_level
|
22 |
+
)
|
23 |
+
import tag_storage
|
24 |
+
|
25 |
+
# Define library properties
|
26 |
+
LIBRARY_INFO = {
|
27 |
+
"name": "The Library",
|
28 |
+
"description": "A vast repository of knowledge where tags are discovered through patient exploration and research.",
|
29 |
+
"color": "#4A148C", # Deep purple
|
30 |
+
"rarities_available": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
31 |
+
"odds_multiplier": 2.0
|
32 |
+
}
|
33 |
+
|
34 |
+
# Define library floors with their unlocking requirements and rarity boosts
|
35 |
+
LIBRARY_FLOORS = [
|
36 |
+
{
|
37 |
+
"name": "Floor of General Works",
|
38 |
+
"description": "The foundation of knowledge. Contains basic tags with limited rarity.",
|
39 |
+
"required_tags": 0, # Available from the start
|
40 |
+
"rarity_boost": 0.0,
|
41 |
+
"color": "#8D99AE", # Light blue-gray
|
42 |
+
"unlocked": True, # Always unlocked
|
43 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend"],
|
44 |
+
"odds_multiplier": 1.0 # Base odds multiplier
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"name": "Floor of History",
|
48 |
+
"description": "Archives of past knowledge. Offers more access to uncommon tags.",
|
49 |
+
"required_tags": 25, # Unlocked after collecting 25 tags
|
50 |
+
"rarity_boost": 0.2,
|
51 |
+
"color": "#457B9D", # Moderate blue
|
52 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague"],
|
53 |
+
"odds_multiplier": 1.2
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "Floor of Technological Sciences",
|
57 |
+
"description": "Repository of technical knowledge. Access to rare tags begins here.",
|
58 |
+
"required_tags": 75, # Unlocked after collecting 75 tags
|
59 |
+
"rarity_boost": 0.4,
|
60 |
+
"color": "#2B9348", # Green
|
61 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague"],
|
62 |
+
"odds_multiplier": 1.5
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"name": "Floor of Literature",
|
66 |
+
"description": "A vast collection of narrative concepts. Higher chance of rare discoveries.",
|
67 |
+
"required_tags": 150, # Unlocked after collecting 150 tags
|
68 |
+
"rarity_boost": 0.6,
|
69 |
+
"color": "#6A0572", # Purple
|
70 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare"],
|
71 |
+
"odds_multiplier": 1.8
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"name": "Floor of Art",
|
75 |
+
"description": "The realm of aesthetic concepts. First access to Urban Nightmare tags.",
|
76 |
+
"required_tags": 250, # Unlocked after collecting 250 tags
|
77 |
+
"rarity_boost": 0.8,
|
78 |
+
"color": "#D90429", # Red
|
79 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare"],
|
80 |
+
"odds_multiplier": 2.2
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"name": "Floor of Natural Sciences",
|
84 |
+
"description": "Where empirical knowledge is cataloged. Significant chance of very rare tags.",
|
85 |
+
"required_tags": 500, # Unlocked after collecting 400 tags
|
86 |
+
"rarity_boost": 1.0,
|
87 |
+
"color": "#1A759F", # Deep blue
|
88 |
+
"rarities": ["Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
89 |
+
"odds_multiplier": 2.5
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"name": "Floor of Language",
|
93 |
+
"description": "The domain of linguistic concepts. First glimpse of Star of the City tags.",
|
94 |
+
"required_tags": 1000, # Unlocked after collecting 600 tags
|
95 |
+
"rarity_boost": 1.2,
|
96 |
+
"color": "#FF8C00", # Orange
|
97 |
+
"rarities": ["Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
98 |
+
"odds_multiplier": 3.0
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"name": "Floor of Social Sciences",
|
102 |
+
"description": "Complex social patterns and abstractions. Notable chance of exceptional rarities.",
|
103 |
+
"required_tags": 2000, # Unlocked after collecting 1000 tags
|
104 |
+
"rarity_boost": 1.4,
|
105 |
+
"color": "#76B041", # Brighter green
|
106 |
+
"rarities": ["Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
107 |
+
"odds_multiplier": 3.5
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"name": "Floor of Philosophy",
|
111 |
+
"description": "The realm of profound thought. First access to the rarest 'Impuritas Civitas' tags.",
|
112 |
+
"required_tags": 5000, # Unlocked after collecting 1500 tags
|
113 |
+
"rarity_boost": 1.6,
|
114 |
+
"color": "#7209B7", # Deep purple
|
115 |
+
"rarities": ["Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
116 |
+
"odds_multiplier": 5.0
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"name": "Floor of Religion",
|
120 |
+
"description": "The ultimate repository of the most profound conceptual territories.",
|
121 |
+
"required_tags": 10000,
|
122 |
+
"rarity_boost": 2.0,
|
123 |
+
"color": "#FFBD00", # Gold
|
124 |
+
"rarities": ["Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
125 |
+
"odds_multiplier": 10.0
|
126 |
+
}
|
127 |
+
]
|
128 |
+
|
129 |
+
def start_instant_expedition():
|
130 |
+
"""
|
131 |
+
Start an instant expedition with a cooldown before next expedition.
|
132 |
+
|
133 |
+
Returns:
|
134 |
+
List of discoveries or None if on cooldown
|
135 |
+
"""
|
136 |
+
# Check if we're on cooldown
|
137 |
+
current_time = time.time()
|
138 |
+
|
139 |
+
if hasattr(st.session_state, 'last_expedition_time'):
|
140 |
+
elapsed_time = current_time - st.session_state.last_expedition_time
|
141 |
+
cooldown_duration = calculate_expedition_duration()
|
142 |
+
|
143 |
+
if elapsed_time < cooldown_duration:
|
144 |
+
# Still on cooldown
|
145 |
+
time_remaining = cooldown_duration - elapsed_time
|
146 |
+
minutes, seconds = divmod(int(time_remaining), 60)
|
147 |
+
st.error(f"Expedition on cooldown. {minutes:02d}:{seconds:02d} remaining.")
|
148 |
+
return None
|
149 |
+
|
150 |
+
# Generate instant discoveries
|
151 |
+
discoveries = generate_expedition_discoveries()
|
152 |
+
|
153 |
+
# Set the cooldown timer
|
154 |
+
st.session_state.last_expedition_time = current_time
|
155 |
+
|
156 |
+
# Save state
|
157 |
+
tag_storage.save_game(st.session_state)
|
158 |
+
|
159 |
+
# Preserve the current tab
|
160 |
+
if 'library_tab_index' not in st.session_state:
|
161 |
+
st.session_state.library_tab_index = 0
|
162 |
+
|
163 |
+
return discoveries
|
164 |
+
|
165 |
+
def generate_expedition_discoveries():
|
166 |
+
"""
|
167 |
+
Generate expedition discoveries instantly.
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
List of discovered tags and their info
|
171 |
+
"""
|
172 |
+
# Get current library floor
|
173 |
+
current_floor = None
|
174 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
175 |
+
|
176 |
+
if hasattr(st.session_state, 'library_floors'):
|
177 |
+
# Find the highest unlocked floor
|
178 |
+
for floor in reversed(st.session_state.library_floors):
|
179 |
+
if collection_size >= floor["required_tags"]:
|
180 |
+
current_floor = floor
|
181 |
+
break
|
182 |
+
|
183 |
+
# Default to first floor if we couldn't find one
|
184 |
+
if not current_floor:
|
185 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
186 |
+
"name": "Archival Records",
|
187 |
+
"rarities": ["Canard", "Urban Myth"],
|
188 |
+
"rarity_boost": 0.0
|
189 |
+
}
|
190 |
+
|
191 |
+
# Calculate rarity odds for discoveries
|
192 |
+
rarity_odds = calculate_rarity_odds()
|
193 |
+
|
194 |
+
# Calculate capacity from upgrades
|
195 |
+
tags_capacity = calculate_expedition_capacity()
|
196 |
+
|
197 |
+
# Generate discoveries
|
198 |
+
discoveries = []
|
199 |
+
for _ in range(tags_capacity):
|
200 |
+
# Select a rarity based on calculated odds
|
201 |
+
rarities = list(rarity_odds.keys())
|
202 |
+
weights = list(rarity_odds.values())
|
203 |
+
selected_rarity = random.choices(rarities, weights=weights, k=1)[0]
|
204 |
+
|
205 |
+
# Now select a random tag with this rarity that hasn't been discovered yet
|
206 |
+
possible_tags = []
|
207 |
+
|
208 |
+
# Check if we have tag metadata with rarity info
|
209 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
210 |
+
# Find all tags of the selected rarity
|
211 |
+
for tag, tag_info in st.session_state.tag_rarity_metadata.items():
|
212 |
+
# Skip if already discovered
|
213 |
+
if tag in st.session_state.discovered_tags:
|
214 |
+
continue
|
215 |
+
|
216 |
+
# Handle both formats - new (dict with rarity) and old (just rarity string)
|
217 |
+
if isinstance(tag_info, dict) and "rarity" in tag_info:
|
218 |
+
if tag_info["rarity"] == selected_rarity:
|
219 |
+
possible_tags.append(tag)
|
220 |
+
elif tag_info == selected_rarity:
|
221 |
+
possible_tags.append(tag)
|
222 |
+
|
223 |
+
# If no undiscovered tags found in the selected rarity, fallback to already discovered tags
|
224 |
+
if not possible_tags:
|
225 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
226 |
+
for tag, tag_info in st.session_state.tag_rarity_metadata.items():
|
227 |
+
# Skip if in different rarity
|
228 |
+
tag_rarity = tag_info.get("rarity", tag_info) if isinstance(tag_info, dict) else tag_info
|
229 |
+
if tag_rarity != selected_rarity:
|
230 |
+
continue
|
231 |
+
|
232 |
+
possible_tags.append(tag)
|
233 |
+
|
234 |
+
# If still no tags found (or no metadata), create a fallback
|
235 |
+
if not possible_tags:
|
236 |
+
# If we have the model's full tag list, use it
|
237 |
+
if hasattr(st.session_state, 'metadata') and 'idx_to_tag' in st.session_state.metadata:
|
238 |
+
all_tags = list(st.session_state.metadata['idx_to_tag'].values())
|
239 |
+
# Just pick a random tag and assign the selected rarity
|
240 |
+
possible_tags = random.sample(all_tags, min(20, len(all_tags)))
|
241 |
+
else:
|
242 |
+
# Complete fallback - use some generic tags
|
243 |
+
possible_tags = ["portrait", "landscape", "digital_art", "anime", "realistic",
|
244 |
+
"fantasy", "sci-fi", "city", "nature", "character"]
|
245 |
+
|
246 |
+
# If we found possible tags, select one randomly
|
247 |
+
if possible_tags:
|
248 |
+
selected_tag = random.choice(possible_tags)
|
249 |
+
|
250 |
+
# Get category from metadata if available
|
251 |
+
category = "unknown"
|
252 |
+
if hasattr(st.session_state, 'metadata') and 'tag_to_category' in st.session_state.metadata:
|
253 |
+
if selected_tag in st.session_state.metadata['tag_to_category']:
|
254 |
+
category = st.session_state.metadata['tag_to_category'][selected_tag]
|
255 |
+
|
256 |
+
# Use the enhanced tag storage function to add the discovered tag
|
257 |
+
is_new = tag_storage.add_discovered_tag(
|
258 |
+
tag=selected_tag,
|
259 |
+
rarity=selected_rarity,
|
260 |
+
session_state=st.session_state,
|
261 |
+
library_floor=current_floor["name"],
|
262 |
+
category=category # Pass the category we found
|
263 |
+
)
|
264 |
+
|
265 |
+
# Record for library growth
|
266 |
+
st.session_state.library_growth["total_discoveries"] += 1
|
267 |
+
st.session_state.library_growth["last_discovery_time"] = time.time()
|
268 |
+
|
269 |
+
# Create timestamp for display
|
270 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
271 |
+
|
272 |
+
# Add to results
|
273 |
+
discoveries.append({
|
274 |
+
"tag": selected_tag,
|
275 |
+
"rarity": selected_rarity,
|
276 |
+
"is_new": is_new,
|
277 |
+
"timestamp": timestamp,
|
278 |
+
"library": current_floor["name"]
|
279 |
+
})
|
280 |
+
|
281 |
+
# Save the game state after discoveries
|
282 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
283 |
+
tag_storage.save_game(st.session_state)
|
284 |
+
|
285 |
+
return discoveries
|
286 |
+
|
287 |
+
def update_discovered_tag_categories():
|
288 |
+
"""Update categories of discovered tags from metadata if they're unknown"""
|
289 |
+
if not hasattr(st.session_state, 'discovered_tags') or not st.session_state.discovered_tags:
|
290 |
+
return 0
|
291 |
+
|
292 |
+
updated_count = 0
|
293 |
+
|
294 |
+
# First try from metadata.tag_to_category
|
295 |
+
if hasattr(st.session_state, 'metadata') and 'tag_to_category' in st.session_state.metadata:
|
296 |
+
tag_to_category = st.session_state.metadata['tag_to_category']
|
297 |
+
|
298 |
+
for tag, info in st.session_state.discovered_tags.items():
|
299 |
+
if info.get('category', 'unknown') == 'unknown' and tag in tag_to_category:
|
300 |
+
info['category'] = tag_to_category[tag]
|
301 |
+
updated_count += 1
|
302 |
+
|
303 |
+
# Then try from tag_rarity_metadata
|
304 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
305 |
+
for tag, info in st.session_state.discovered_tags.items():
|
306 |
+
if info.get('category', 'unknown') == 'unknown' and tag in st.session_state.tag_rarity_metadata:
|
307 |
+
tag_metadata = st.session_state.tag_rarity_metadata[tag]
|
308 |
+
if isinstance(tag_metadata, dict) and "category" in tag_metadata:
|
309 |
+
info['category'] = tag_metadata["category"]
|
310 |
+
updated_count += 1
|
311 |
+
|
312 |
+
if updated_count > 0:
|
313 |
+
print(f"Updated categories for {updated_count} discovered tags")
|
314 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
315 |
+
|
316 |
+
return updated_count
|
317 |
+
|
318 |
+
def calculate_expedition_duration():
|
319 |
+
"""
|
320 |
+
Calculate the duration of cooldown after an expedition based on upgrades.
|
321 |
+
|
322 |
+
Returns:
|
323 |
+
Duration in seconds for the cooldown
|
324 |
+
"""
|
325 |
+
base_duration = 10 # Default to 10 seconds
|
326 |
+
|
327 |
+
# Apply speed upgrades if they exist
|
328 |
+
speed_level = 1
|
329 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
330 |
+
speed_level = st.session_state.library_upgrades.get("speed", 1)
|
331 |
+
|
332 |
+
# Each speed level reduces duration by 10% (multiplicative)
|
333 |
+
duration_multiplier = 0.9 ** (speed_level - 1)
|
334 |
+
|
335 |
+
# Calculate final duration (minimum 1 second)
|
336 |
+
duration = max(1, base_duration * duration_multiplier)
|
337 |
+
|
338 |
+
return duration
|
339 |
+
|
340 |
+
def calculate_expedition_capacity():
|
341 |
+
"""
|
342 |
+
Calculate how many tags can be discovered in one expedition.
|
343 |
+
|
344 |
+
Returns:
|
345 |
+
Number of tags that can be discovered
|
346 |
+
"""
|
347 |
+
base_capacity = 1 # Default to 1 discovery per expedition
|
348 |
+
|
349 |
+
# Apply capacity upgrades if they exist
|
350 |
+
capacity_level = 1
|
351 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
352 |
+
capacity_level = st.session_state.library_upgrades.get("capacity", 1)
|
353 |
+
|
354 |
+
# Each capacity level increases discoveries by 1
|
355 |
+
capacity = base_capacity + (capacity_level - 1)
|
356 |
+
|
357 |
+
return capacity
|
358 |
+
|
359 |
+
def calculate_rarity_odds():
|
360 |
+
"""
|
361 |
+
Calculate rarity odds based on library floor level and upgrades.
|
362 |
+
|
363 |
+
Returns:
|
364 |
+
Dictionary of {rarity: probability} for available rarities
|
365 |
+
"""
|
366 |
+
# Get current library floor
|
367 |
+
current_floor = None
|
368 |
+
if hasattr(st.session_state, 'library_floors'):
|
369 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
370 |
+
|
371 |
+
# Find the highest unlocked floor
|
372 |
+
for floor in reversed(st.session_state.library_floors):
|
373 |
+
if collection_size >= floor["required_tags"]:
|
374 |
+
current_floor = floor
|
375 |
+
break
|
376 |
+
|
377 |
+
# Default to first floor if we couldn't find one
|
378 |
+
if not current_floor:
|
379 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
380 |
+
"rarities": ["Canard", "Urban Myth"],
|
381 |
+
"rarity_boost": 0.0,
|
382 |
+
"odds_multiplier": 1.0
|
383 |
+
}
|
384 |
+
|
385 |
+
# Get available rarities from current floor
|
386 |
+
available_rarities = current_floor.get("rarities", ["Canard", "Urban Myth"])
|
387 |
+
odds_multiplier = current_floor.get("odds_multiplier", 1.0)
|
388 |
+
|
389 |
+
# Base weights for each rarity
|
390 |
+
base_weights = {
|
391 |
+
"Canard": 70,
|
392 |
+
"Urban Myth": 20,
|
393 |
+
"Urban Legend": 7,
|
394 |
+
"Urban Plague": 2,
|
395 |
+
"Urban Nightmare": 1,
|
396 |
+
"Star of the City": 0.1,
|
397 |
+
"Impuritas Civitas": 0.01
|
398 |
+
}
|
399 |
+
|
400 |
+
# Apply floor's rarity boost
|
401 |
+
floor_rarity_boost = current_floor.get("rarity_boost", 0.0)
|
402 |
+
|
403 |
+
# Apply rarity upgrades if they exist
|
404 |
+
rarity_level = 1
|
405 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
406 |
+
rarity_level = st.session_state.library_upgrades.get("rarity", 1)
|
407 |
+
|
408 |
+
# Calculate boost based on rarity level
|
409 |
+
upgrade_rarity_boost = (rarity_level - 1) * 0.2 # Each level gives 20% more chance for rare tags
|
410 |
+
|
411 |
+
# Combine boosts
|
412 |
+
total_boost = floor_rarity_boost + upgrade_rarity_boost
|
413 |
+
|
414 |
+
# Adjust weights based on rarity boost
|
415 |
+
adjusted_weights = {}
|
416 |
+
for rarity in available_rarities:
|
417 |
+
if rarity == "Canard":
|
418 |
+
# Reduce common tag odds as rarity level increases
|
419 |
+
adjusted_weights[rarity] = base_weights[rarity] * (1.0 - total_boost * 0.7)
|
420 |
+
elif rarity == "Urban Myth":
|
421 |
+
# Slight reduction for uncommon as rarity level increases
|
422 |
+
adjusted_weights[rarity] = base_weights[rarity] * (1.0 - total_boost * 0.3)
|
423 |
+
else:
|
424 |
+
# Increase rare tag odds as rarity level increases
|
425 |
+
rarity_index = list(RARITY_LEVELS.keys()).index(rarity)
|
426 |
+
# Higher rarities get larger boosts
|
427 |
+
boost_factor = 1.0 + (total_boost * odds_multiplier * (rarity_index + 1))
|
428 |
+
adjusted_weights[rarity] = base_weights[rarity] * boost_factor
|
429 |
+
|
430 |
+
# Normalize weights
|
431 |
+
total = sum(adjusted_weights.values())
|
432 |
+
normalized_weights = {r: w/total for r, w in adjusted_weights.items()}
|
433 |
+
|
434 |
+
return normalized_weights
|
435 |
+
|
436 |
+
def format_time_remaining(seconds):
|
437 |
+
"""
|
438 |
+
Format seconds into a human-readable time remaining format.
|
439 |
+
|
440 |
+
Args:
|
441 |
+
seconds: Seconds remaining
|
442 |
+
|
443 |
+
Returns:
|
444 |
+
String with formatted time
|
445 |
+
"""
|
446 |
+
if seconds < 60:
|
447 |
+
return f"{int(seconds)} seconds"
|
448 |
+
elif seconds < 3600:
|
449 |
+
minutes = seconds / 60
|
450 |
+
return f"{int(minutes)} minutes"
|
451 |
+
else:
|
452 |
+
hours = seconds / 3600
|
453 |
+
minutes = (seconds % 3600) / 60
|
454 |
+
if minutes > 0:
|
455 |
+
return f"{int(hours)} hours, {int(minutes)} minutes"
|
456 |
+
else:
|
457 |
+
return f"{int(hours)} hours"
|
458 |
+
|
459 |
+
def display_cooldown_timer():
|
460 |
+
"""Display a countdown timer until the next expedition is available"""
|
461 |
+
# Check if on cooldown
|
462 |
+
current_time = time.time()
|
463 |
+
cooldown_remaining = 0
|
464 |
+
cooldown_duration = calculate_expedition_duration()
|
465 |
+
|
466 |
+
if hasattr(st.session_state, 'last_expedition_time'):
|
467 |
+
elapsed_time = current_time - st.session_state.last_expedition_time
|
468 |
+
if elapsed_time < cooldown_duration:
|
469 |
+
cooldown_remaining = cooldown_duration - elapsed_time
|
470 |
+
|
471 |
+
# If on cooldown, show timer
|
472 |
+
if cooldown_remaining > 0:
|
473 |
+
minutes, seconds = divmod(int(cooldown_remaining), 60)
|
474 |
+
|
475 |
+
# Create a timer display with dark-mode styling
|
476 |
+
st.markdown("""
|
477 |
+
<div style="background-color: rgba(255, 152, 0, 0.15);
|
478 |
+
border: 1px solid #FF9800;
|
479 |
+
border-radius: 5px;
|
480 |
+
padding: 10px;
|
481 |
+
text-align: center;
|
482 |
+
margin-bottom: 15px;
|
483 |
+
color: #ffffff;">
|
484 |
+
<p style="margin: 0; font-weight: bold;">⏱️ Next expedition available in:</p>
|
485 |
+
<p style="font-size: 1.2em; margin: 5px 0;">{:02d}:{:02d}</p>
|
486 |
+
</div>
|
487 |
+
""".format(minutes, seconds), unsafe_allow_html=True)
|
488 |
+
|
489 |
+
# Add refresh button for the timer
|
490 |
+
if st.button("🔄 Refresh Timer", key="refresh_timer"):
|
491 |
+
st.rerun()
|
492 |
+
|
493 |
+
return True # Still on cooldown
|
494 |
+
|
495 |
+
return False # Not on cooldown
|
496 |
+
|
497 |
+
def display_library_exploration_interface():
|
498 |
+
"""Display the unified interface for library exploration using Streamlit elements."""
|
499 |
+
# Tag collection progress
|
500 |
+
tag_count = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
501 |
+
|
502 |
+
# Check if we have tags to start exploring
|
503 |
+
if not hasattr(st.session_state, 'collected_tags') or not st.session_state.collected_tags:
|
504 |
+
st.warning("Start scanning images to collect tags first. The library will grow as you collect more tags!")
|
505 |
+
return
|
506 |
+
|
507 |
+
# Get current library floor
|
508 |
+
current_floor = None
|
509 |
+
if hasattr(st.session_state, 'library_floors'):
|
510 |
+
# Find the highest unlocked floor
|
511 |
+
for floor in reversed(st.session_state.library_floors):
|
512 |
+
if tag_count >= floor["required_tags"]:
|
513 |
+
current_floor = floor
|
514 |
+
break
|
515 |
+
|
516 |
+
# Default to first floor if we couldn't find one
|
517 |
+
if not current_floor:
|
518 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
519 |
+
"name": "Floor of General Works",
|
520 |
+
"description": "The foundational level of knowledge.",
|
521 |
+
"color": "#607D8B",
|
522 |
+
"rarities": ["Canard", "Urban Myth"]
|
523 |
+
}
|
524 |
+
|
525 |
+
# Library growth progress
|
526 |
+
total_discoveries = st.session_state.library_growth["total_discoveries"]
|
527 |
+
|
528 |
+
# Create container with colored border for current floor
|
529 |
+
floor_container = st.container()
|
530 |
+
with floor_container:
|
531 |
+
# Use a stylized container with dark mode theme
|
532 |
+
st.markdown(f"""
|
533 |
+
<div style="border-left: 5px solid {current_floor['color']};
|
534 |
+
border-radius: 5px;
|
535 |
+
background-color: rgba({int(current_floor['color'][1:3], 16)},
|
536 |
+
{int(current_floor['color'][3:5], 16)},
|
537 |
+
{int(current_floor['color'][5:7], 16)}, 0.15);
|
538 |
+
padding: 15px 10px 10px 15px;
|
539 |
+
margin-bottom: 15px;
|
540 |
+
color: #ffffff;">
|
541 |
+
<h3 style="margin-top: 0; color: {current_floor['color']};">{current_floor['name']}</h3>
|
542 |
+
<p>{current_floor['description']}</p>
|
543 |
+
<p>Total Discoveries: <strong>{total_discoveries}</strong></p>
|
544 |
+
</div>
|
545 |
+
""", unsafe_allow_html=True)
|
546 |
+
|
547 |
+
# Create a nice divider for dark theme
|
548 |
+
st.markdown("<hr style='margin: 20px 0; border: 0; height: 1px; background-image: linear-gradient(to right, rgba(255, 255, 255, 0), rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0));'>", unsafe_allow_html=True)
|
549 |
+
|
550 |
+
# Display expedition details
|
551 |
+
st.subheader("Expedition Details")
|
552 |
+
|
553 |
+
# Calculate capacity
|
554 |
+
capacity = calculate_expedition_capacity()
|
555 |
+
|
556 |
+
# Two columns for expedition stats
|
557 |
+
col1, col2 = st.columns(2)
|
558 |
+
|
559 |
+
with col1:
|
560 |
+
# Expedition duration/timer
|
561 |
+
cooldown_duration = calculate_expedition_duration()
|
562 |
+
st.write(f"📊 Cooldown: {format_time_remaining(cooldown_duration)}")
|
563 |
+
st.write(f"🔍 Tag Discoveries: {capacity} per expedition")
|
564 |
+
|
565 |
+
with col2:
|
566 |
+
# Calculate and display rarity odds with Streamlit elements
|
567 |
+
rarity_odds = calculate_rarity_odds()
|
568 |
+
available_rarities = current_floor.get("rarities", ["Canard", "Urban Myth"])
|
569 |
+
|
570 |
+
# Display rarity chances with dark theme styling
|
571 |
+
for rarity in available_rarities:
|
572 |
+
if rarity in rarity_odds:
|
573 |
+
color = RARITY_LEVELS[rarity]["color"]
|
574 |
+
percentage = rarity_odds[rarity]*100
|
575 |
+
|
576 |
+
# Custom styling based on rarity
|
577 |
+
if rarity == "Impuritas Civitas":
|
578 |
+
st.markdown(f"""
|
579 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
580 |
+
<span style="animation: rainbow-text 4s linear infinite; font-weight: bold; width: 140px;">{rarity}:</span>
|
581 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
582 |
+
<div style="width: {min(percentage*5, 100)}%; background: linear-gradient(to right, red, orange, yellow, green, blue, indigo, violet); height: 10px; border-radius: 5px;"></div>
|
583 |
+
</div>
|
584 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
585 |
+
</div>
|
586 |
+
""", unsafe_allow_html=True)
|
587 |
+
elif rarity == "Star of the City":
|
588 |
+
st.markdown(f"""
|
589 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
590 |
+
<span style="color:{color}; text-shadow: 0 0 3px gold; font-weight: bold; width: 140px;">{rarity}:</span>
|
591 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
592 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; box-shadow: 0 0 5px gold; height: 10px; border-radius: 5px;"></div>
|
593 |
+
</div>
|
594 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
595 |
+
</div>
|
596 |
+
""", unsafe_allow_html=True)
|
597 |
+
elif rarity == "Urban Nightmare":
|
598 |
+
st.markdown(f"""
|
599 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
600 |
+
<span style="color:{color}; text-shadow: 0 0 1px #FF5722; font-weight: bold; width: 140px;">{rarity}:</span>
|
601 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
602 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; animation: pulse-bar 3s infinite; height: 10px; border-radius: 5px;"></div>
|
603 |
+
</div>
|
604 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
605 |
+
</div>
|
606 |
+
""", unsafe_allow_html=True)
|
607 |
+
else:
|
608 |
+
st.markdown(f"""
|
609 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
610 |
+
<span style="color:{color}; font-weight: bold; width: 140px;">{rarity}:</span>
|
611 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
612 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; height: 10px; border-radius: 5px;"></div>
|
613 |
+
</div>
|
614 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
615 |
+
</div>
|
616 |
+
""", unsafe_allow_html=True)
|
617 |
+
|
618 |
+
# Check for cooldown and display timer if needed
|
619 |
+
on_cooldown = display_cooldown_timer()
|
620 |
+
|
621 |
+
# Add button to start expedition regardless of cooldown status
|
622 |
+
# The button will always be displayed, but if on cooldown, expedition won't start
|
623 |
+
if st.button("🚀 Start Expedition", key="start_expedition", use_container_width=True, disabled=on_cooldown):
|
624 |
+
if not on_cooldown:
|
625 |
+
discoveries = start_instant_expedition()
|
626 |
+
if discoveries:
|
627 |
+
# Store discovery results for display
|
628 |
+
st.session_state.expedition_results = discoveries
|
629 |
+
# Show success message
|
630 |
+
st.success(f"Expedition completed! Discovered {len(discoveries)} new tags!")
|
631 |
+
# Show balloons for celebration
|
632 |
+
st.balloons()
|
633 |
+
# Display the results
|
634 |
+
display_expedition_results(discoveries)
|
635 |
+
# Save state
|
636 |
+
tag_storage.save_game(st.session_state)
|
637 |
+
else:
|
638 |
+
# This should not be reached due to disabled button, but just in case
|
639 |
+
st.error("Expedition on cooldown. Please wait until the timer expires.")
|
640 |
+
|
641 |
+
# Display library upgrades
|
642 |
+
display_library_upgrades()
|
643 |
+
|
644 |
+
def display_expedition_results(results):
|
645 |
+
"""Display results from completed expeditions using Streamlit elements with enhanced dark-mode visuals."""
|
646 |
+
st.subheader("Expedition Discoveries")
|
647 |
+
|
648 |
+
# Add animations CSS for dark theme
|
649 |
+
st.markdown("""
|
650 |
+
<style>
|
651 |
+
@keyframes rainbow-text {
|
652 |
+
0% { color: red; }
|
653 |
+
14% { color: orange; }
|
654 |
+
28% { color: yellow; }
|
655 |
+
42% { color: green; }
|
656 |
+
57% { color: blue; }
|
657 |
+
71% { color: indigo; }
|
658 |
+
85% { color: violet; }
|
659 |
+
100% { color: red; }
|
660 |
+
}
|
661 |
+
|
662 |
+
@keyframes rainbow-border {
|
663 |
+
0% { border-color: red; }
|
664 |
+
14% { border-color: orange; }
|
665 |
+
28% { border-color: yellow; }
|
666 |
+
42% { border-color: green; }
|
667 |
+
57% { border-color: blue; }
|
668 |
+
71% { border-color: indigo; }
|
669 |
+
85% { border-color: violet; }
|
670 |
+
100% { border-color: red; }
|
671 |
+
}
|
672 |
+
|
673 |
+
@keyframes star-glow {
|
674 |
+
0% { box-shadow: 0 0 5px #FFD700; }
|
675 |
+
50% { box-shadow: 0 0 15px #FFD700; }
|
676 |
+
100% { box-shadow: 0 0 5px #FFD700; }
|
677 |
+
}
|
678 |
+
|
679 |
+
@keyframes nightmare-pulse {
|
680 |
+
0% { border-color: #FF9800; }
|
681 |
+
50% { border-color: #FF5722; }
|
682 |
+
100% { border-color: #FF9800; }
|
683 |
+
}
|
684 |
+
|
685 |
+
@keyframes pulse-bar {
|
686 |
+
0% { opacity: 0.8; }
|
687 |
+
50% { opacity: 1; }
|
688 |
+
100% { opacity: 0.8; }
|
689 |
+
}
|
690 |
+
|
691 |
+
.expedition-tag-impuritas {
|
692 |
+
animation: rainbow-text 4s linear infinite;
|
693 |
+
font-weight: bold;
|
694 |
+
}
|
695 |
+
|
696 |
+
.expedition-card-impuritas {
|
697 |
+
background-color: rgba(255, 0, 0, 0.15);
|
698 |
+
border-radius: 8px;
|
699 |
+
border: 3px solid red;
|
700 |
+
padding: 12px;
|
701 |
+
animation: rainbow-border 4s linear infinite;
|
702 |
+
color: #ffffff;
|
703 |
+
}
|
704 |
+
|
705 |
+
.expedition-card-star {
|
706 |
+
background-color: rgba(255, 215, 0, 0.15);
|
707 |
+
border-radius: 8px;
|
708 |
+
border: 2px solid gold;
|
709 |
+
padding: 12px;
|
710 |
+
animation: star-glow 2s infinite;
|
711 |
+
color: #ffffff;
|
712 |
+
}
|
713 |
+
|
714 |
+
.expedition-card-nightmare {
|
715 |
+
background-color: rgba(255, 152, 0, 0.15);
|
716 |
+
border-radius: 8px;
|
717 |
+
border: 2px solid #FF9800;
|
718 |
+
padding: 12px;
|
719 |
+
animation: nightmare-pulse 3s infinite;
|
720 |
+
color: #ffffff;
|
721 |
+
}
|
722 |
+
|
723 |
+
.expedition-card-plague {
|
724 |
+
background-color: rgba(156, 39, 176, 0.12);
|
725 |
+
border-radius: 8px;
|
726 |
+
border: 1px solid #9C27B0;
|
727 |
+
padding: 12px;
|
728 |
+
box-shadow: 0 0 3px #9C27B0;
|
729 |
+
color: #ffffff;
|
730 |
+
}
|
731 |
+
|
732 |
+
.expedition-card-legend {
|
733 |
+
background-color: rgba(33, 150, 243, 0.15);
|
734 |
+
border-radius: 8px;
|
735 |
+
border: 1px solid #2196F3;
|
736 |
+
padding: 12px;
|
737 |
+
color: #ffffff;
|
738 |
+
}
|
739 |
+
|
740 |
+
.expedition-card-myth {
|
741 |
+
background-color: rgba(76, 175, 80, 0.15);
|
742 |
+
border-radius: 8px;
|
743 |
+
border: 1px solid #4CAF50;
|
744 |
+
padding: 12px;
|
745 |
+
color: #ffffff;
|
746 |
+
}
|
747 |
+
|
748 |
+
.expedition-card-canard {
|
749 |
+
background-color: rgba(170, 170, 170, 0.15);
|
750 |
+
border-radius: 8px;
|
751 |
+
border: 1px solid #AAAAAA;
|
752 |
+
padding: 12px;
|
753 |
+
color: #ffffff;
|
754 |
+
}
|
755 |
+
</style>
|
756 |
+
""", unsafe_allow_html=True)
|
757 |
+
|
758 |
+
# Group by rarity first for better organization
|
759 |
+
results_by_rarity = {}
|
760 |
+
for result in results:
|
761 |
+
rarity = result["rarity"]
|
762 |
+
if rarity not in results_by_rarity:
|
763 |
+
results_by_rarity[rarity] = []
|
764 |
+
results_by_rarity[rarity].append(result)
|
765 |
+
|
766 |
+
# Get ordered rarities (rarest first)
|
767 |
+
ordered_rarities = list(RARITY_LEVELS.keys())
|
768 |
+
ordered_rarities.reverse() # Reverse to display rarest first
|
769 |
+
|
770 |
+
# Display rare discoveries first
|
771 |
+
for rarity in ordered_rarities:
|
772 |
+
if rarity not in results_by_rarity:
|
773 |
+
continue
|
774 |
+
|
775 |
+
rarity_results = results_by_rarity[rarity]
|
776 |
+
if not rarity_results:
|
777 |
+
continue
|
778 |
+
|
779 |
+
color = RARITY_LEVELS[rarity]["color"]
|
780 |
+
|
781 |
+
# Special styling for rare discoveries
|
782 |
+
if rarity == "Impuritas Civitas":
|
783 |
+
st.markdown(f"""
|
784 |
+
<div style="background-color: rgba(255, 0, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid red; animation: rainbow-border 4s linear infinite; color: #ffffff;">
|
785 |
+
<h3 style="margin-top: 0; animation: rainbow-text 4s linear infinite;">✨ EXTRAORDINARY DISCOVERY!</h3>
|
786 |
+
<p>You found {len(rarity_results)} Impuritas Civitas tag(s)!</p>
|
787 |
+
</div>
|
788 |
+
""", unsafe_allow_html=True)
|
789 |
+
st.balloons() # Add celebration effect
|
790 |
+
elif rarity == "Star of the City":
|
791 |
+
st.markdown(f"""
|
792 |
+
<div style="background-color: rgba(255, 215, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid gold; animation: star-glow 2s infinite; color: #ffffff;">
|
793 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 3px gold;">🌟 EXCEPTIONAL DISCOVERY!</h3>
|
794 |
+
<p>You found {len(rarity_results)} Star of the City tag(s)!</p>
|
795 |
+
</div>
|
796 |
+
""", unsafe_allow_html=True)
|
797 |
+
elif rarity == "Urban Nightmare":
|
798 |
+
st.markdown(f"""
|
799 |
+
<div style="background-color: rgba(255, 152, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid #FF9800; animation: nightmare-pulse 3s infinite; color: #ffffff;">
|
800 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 1px #FF5722;">👑 RARE DISCOVERY!</h3>
|
801 |
+
<p>You found {len(rarity_results)} Urban Nightmare tag(s)!</p>
|
802 |
+
</div>
|
803 |
+
""", unsafe_allow_html=True)
|
804 |
+
elif rarity == "Urban Plague":
|
805 |
+
st.markdown(f"""
|
806 |
+
<div style="background-color: rgba(156, 39, 176, 0.12); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 1px solid #9C27B0; box-shadow: 0 0 3px #9C27B0; color: #ffffff;">
|
807 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 1px #9C27B0;">⚔️ UNCOMMON DISCOVERY!</h3>
|
808 |
+
<p>You found {len(rarity_results)} Urban Plague tag(s)!</p>
|
809 |
+
</div>
|
810 |
+
""", unsafe_allow_html=True)
|
811 |
+
else:
|
812 |
+
st.markdown(f"### {rarity} ({len(rarity_results)} discoveries)")
|
813 |
+
|
814 |
+
# Display tags in this rarity
|
815 |
+
cols = st.columns(3)
|
816 |
+
for i, result in enumerate(rarity_results):
|
817 |
+
col_idx = i % 3
|
818 |
+
with cols[col_idx]:
|
819 |
+
tag = result["tag"]
|
820 |
+
floor_name = result.get("library", "Library")
|
821 |
+
|
822 |
+
# Get the appropriate card class based on rarity
|
823 |
+
rarity_class = rarity.lower().replace(' ', '-')
|
824 |
+
card_class = f"expedition-card-{rarity_class}"
|
825 |
+
|
826 |
+
# Create styled card for each tag
|
827 |
+
tag_html = f"""<div class="{card_class}">"""
|
828 |
+
|
829 |
+
# Special styling for the tag name based on rarity
|
830 |
+
if rarity == "Impuritas Civitas":
|
831 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span class="expedition-tag-impuritas">✨ {tag}</span></p>"""
|
832 |
+
elif rarity == "Star of the City":
|
833 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 3px gold; font-weight: bold;">🌟 {tag}</span></p>"""
|
834 |
+
elif rarity == "Urban Nightmare":
|
835 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 1px #FF5722; font-weight: bold;">👑 {tag}</span></p>"""
|
836 |
+
elif rarity == "Urban Plague":
|
837 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 1px #9C27B0; font-weight: bold;">⚔️ {tag}</span></p>"""
|
838 |
+
else:
|
839 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; font-weight: bold;">{tag}</span></p>"""
|
840 |
+
|
841 |
+
# Mark as new if it is
|
842 |
+
is_new = result.get("is_new", False)
|
843 |
+
new_badge = """<span style="background-color: #4CAF50; color: white; padding: 2px 6px; border-radius: 10px; font-size: 0.7em; margin-left: 5px;">NEW</span>""" if is_new else ""
|
844 |
+
|
845 |
+
# Add other tag details
|
846 |
+
tag_html += f"""
|
847 |
+
<p style="margin: 0; font-size: 0.9em;">Found in: {floor_name} {new_badge}</p>
|
848 |
+
<p style="margin: 5px 0 0 0; font-size: 0.9em;">Rarity: <span style="color: {color};">{rarity}</span></p>
|
849 |
+
</div>
|
850 |
+
"""
|
851 |
+
|
852 |
+
st.markdown(tag_html, unsafe_allow_html=True)
|
853 |
+
|
854 |
+
# Add separator between rarity groups
|
855 |
+
st.markdown("<hr style='margin: 20px 0; border: 0; height: 1px; background-image: linear-gradient(to right, rgba(255, 255, 255, 0), rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0));'>", unsafe_allow_html=True)
|
856 |
+
|
857 |
+
def display_library_building():
|
858 |
+
"""Display a visual representation of the library building with all floors."""
|
859 |
+
st.subheader("The Great Library Building")
|
860 |
+
|
861 |
+
# Get collection size
|
862 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
863 |
+
|
864 |
+
# Determine current floor
|
865 |
+
current_floor_index = 0
|
866 |
+
for i, floor in enumerate(st.session_state.library_floors):
|
867 |
+
if collection_size >= floor["required_tags"]:
|
868 |
+
current_floor_index = i
|
869 |
+
|
870 |
+
# Create a visual representation of the library building
|
871 |
+
total_floors = len(st.session_state.library_floors)
|
872 |
+
|
873 |
+
# Enhanced CSS for the library building with dark theme
|
874 |
+
st.markdown("""
|
875 |
+
<style>
|
876 |
+
@keyframes floor-glow {
|
877 |
+
0% { box-shadow: 0 0 5px rgba(13, 110, 253, 0.5); }
|
878 |
+
50% { box-shadow: 0 0 15px rgba(13, 110, 253, 0.8); }
|
879 |
+
100% { box-shadow: 0 0 5px rgba(13, 110, 253, 0.5); }
|
880 |
+
}
|
881 |
+
|
882 |
+
.library-roof {
|
883 |
+
background: linear-gradient(90deg, #8D6E63, #A1887F);
|
884 |
+
height: 35px;
|
885 |
+
width: 90%;
|
886 |
+
margin: 0 auto;
|
887 |
+
border-radius: 8px 8px 0 0;
|
888 |
+
display: flex;
|
889 |
+
justify-content: center;
|
890 |
+
align-items: center;
|
891 |
+
color: white;
|
892 |
+
font-weight: bold;
|
893 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.4);
|
894 |
+
}
|
895 |
+
|
896 |
+
.library-floor {
|
897 |
+
height: 65px;
|
898 |
+
width: 80%;
|
899 |
+
margin: 0 auto;
|
900 |
+
border: 1px solid #444;
|
901 |
+
display: flex;
|
902 |
+
align-items: center;
|
903 |
+
padding: 0 20px;
|
904 |
+
position: relative;
|
905 |
+
transition: all 0.3s ease;
|
906 |
+
color: #ffffff;
|
907 |
+
background-color: #2c2c2c;
|
908 |
+
}
|
909 |
+
|
910 |
+
.library-floor:hover {
|
911 |
+
transform: translateX(10px);
|
912 |
+
}
|
913 |
+
|
914 |
+
.library-floor.current {
|
915 |
+
box-shadow: 0 0 10px rgba(13, 110, 253, 0.5);
|
916 |
+
z-index: 2;
|
917 |
+
animation: floor-glow 2s infinite;
|
918 |
+
border-left: 5px solid #0d6efd;
|
919 |
+
}
|
920 |
+
|
921 |
+
.library-floor.locked {
|
922 |
+
background-color: #1e1e1e;
|
923 |
+
color: #777;
|
924 |
+
filter: grayscale(50%);
|
925 |
+
}
|
926 |
+
|
927 |
+
.library-floor-number {
|
928 |
+
position: absolute;
|
929 |
+
left: -30px;
|
930 |
+
width: 25px;
|
931 |
+
height: 25px;
|
932 |
+
background-color: #0d6efd;
|
933 |
+
color: white;
|
934 |
+
border-radius: 50%;
|
935 |
+
display: flex;
|
936 |
+
justify-content: center;
|
937 |
+
align-items: center;
|
938 |
+
font-weight: bold;
|
939 |
+
}
|
940 |
+
|
941 |
+
.library-floor.locked .library-floor-number {
|
942 |
+
background-color: #555;
|
943 |
+
}
|
944 |
+
|
945 |
+
.library-entrance {
|
946 |
+
background: linear-gradient(90deg, #5D4037, #795548);
|
947 |
+
height: 45px;
|
948 |
+
width: 35%;
|
949 |
+
margin: 0 auto;
|
950 |
+
border-radius: 10px 10px 0 0;
|
951 |
+
display: flex;
|
952 |
+
justify-content: center;
|
953 |
+
align-items: center;
|
954 |
+
color: white;
|
955 |
+
font-weight: bold;
|
956 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.4);
|
957 |
+
}
|
958 |
+
|
959 |
+
.library-floor-details {
|
960 |
+
flex: 1;
|
961 |
+
}
|
962 |
+
|
963 |
+
.library-floor-name {
|
964 |
+
font-weight: bold;
|
965 |
+
margin: 0;
|
966 |
+
}
|
967 |
+
|
968 |
+
.library-floor-description {
|
969 |
+
font-size: 0.85em;
|
970 |
+
margin: 3px 0 0 0;
|
971 |
+
opacity: 0.9;
|
972 |
+
}
|
973 |
+
|
974 |
+
.library-floor-status {
|
975 |
+
display: flex;
|
976 |
+
align-items: center;
|
977 |
+
font-weight: bold;
|
978 |
+
}
|
979 |
+
|
980 |
+
.library-floor-rarities {
|
981 |
+
font-size: 0.8em;
|
982 |
+
margin-top: 4px;
|
983 |
+
}
|
984 |
+
|
985 |
+
.rarity-dot {
|
986 |
+
display: inline-block;
|
987 |
+
width: 8px;
|
988 |
+
height: 8px;
|
989 |
+
border-radius: 50%;
|
990 |
+
margin-right: 3px;
|
991 |
+
}
|
992 |
+
|
993 |
+
/* Special animations for rarer floors */
|
994 |
+
.library-floor.star {
|
995 |
+
background-color: rgba(255, 215, 0, 0.15);
|
996 |
+
}
|
997 |
+
|
998 |
+
.library-floor.impuritas {
|
999 |
+
background-color: rgba(255, 0, 0, 0.15);
|
1000 |
+
}
|
1001 |
+
|
1002 |
+
@keyframes rainbow-border {
|
1003 |
+
0% { border-color: red; }
|
1004 |
+
14% { border-color: orange; }
|
1005 |
+
28% { border-color: yellow; }
|
1006 |
+
42% { border-color: green; }
|
1007 |
+
57% { border-color: blue; }
|
1008 |
+
71% { border-color: indigo; }
|
1009 |
+
85% { border-color: violet; }
|
1010 |
+
100% { border-color: red; }
|
1011 |
+
}
|
1012 |
+
|
1013 |
+
.rainbow-border {
|
1014 |
+
animation: rainbow-border 4s linear infinite;
|
1015 |
+
}
|
1016 |
+
</style>
|
1017 |
+
""", unsafe_allow_html=True)
|
1018 |
+
|
1019 |
+
# Roof
|
1020 |
+
st.markdown('<div class="library-roof">🏛️ The Great Library</div>', unsafe_allow_html=True)
|
1021 |
+
|
1022 |
+
# Display floors from top (highest) to bottom
|
1023 |
+
for i in reversed(range(total_floors)):
|
1024 |
+
floor = st.session_state.library_floors[i]
|
1025 |
+
is_current = i == current_floor_index
|
1026 |
+
is_unlocked = collection_size >= floor["required_tags"]
|
1027 |
+
|
1028 |
+
# Style based on floor status
|
1029 |
+
floor_class = "library-floor"
|
1030 |
+
if is_current:
|
1031 |
+
floor_class += " current"
|
1032 |
+
if not is_unlocked:
|
1033 |
+
floor_class += " locked"
|
1034 |
+
|
1035 |
+
# Add special classes for highest floors
|
1036 |
+
if i >= 8 and is_unlocked: # Impuritas Civitas level floors
|
1037 |
+
floor_class += " impuritas"
|
1038 |
+
elif i >= 6 and is_unlocked: # Star of the City level floors
|
1039 |
+
floor_class += " star"
|
1040 |
+
|
1041 |
+
# Determine rarity dots HTML
|
1042 |
+
rarity_dots = ""
|
1043 |
+
for rarity in floor.get("rarities", []):
|
1044 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1045 |
+
rarity_dots += f'<span class="rarity-dot" style="background-color:{color};"></span>'
|
1046 |
+
|
1047 |
+
# Floor style based on color
|
1048 |
+
if is_unlocked:
|
1049 |
+
floor_style = f"background-color: rgba({int(floor['color'][1:3], 16)}, {int(floor['color'][3:5], 16)}, {int(floor['color'][5:7], 16)}, 0.25);"
|
1050 |
+
else:
|
1051 |
+
floor_style = ""
|
1052 |
+
|
1053 |
+
# Special border animation for highest floor
|
1054 |
+
border_class = ""
|
1055 |
+
if i == 9 and is_unlocked: # Top floor
|
1056 |
+
border_class = "rainbow-border"
|
1057 |
+
|
1058 |
+
# Display the floor
|
1059 |
+
floor_content = f"""
|
1060 |
+
<div class="{floor_class} {border_class}" style="{floor_style}">
|
1061 |
+
<span class="library-floor-number">{i+1}</span>
|
1062 |
+
<div class="library-floor-details">
|
1063 |
+
<p class="library-floor-name">{floor['name']}</p>
|
1064 |
+
<p class="library-floor-description">{floor['description'] if is_unlocked else 'Locked'}</p>
|
1065 |
+
<div class="library-floor-rarities">{rarity_dots}</div>
|
1066 |
+
</div>
|
1067 |
+
<div class="library-floor-status">
|
1068 |
+
{"🔓" if is_unlocked else "🔒"} {floor['required_tags']} tags
|
1069 |
+
</div>
|
1070 |
+
</div>
|
1071 |
+
"""
|
1072 |
+
st.markdown(floor_content, unsafe_allow_html=True)
|
1073 |
+
|
1074 |
+
# Entrance
|
1075 |
+
st.markdown('<div class="library-entrance">📚 Entrance</div>', unsafe_allow_html=True)
|
1076 |
+
|
1077 |
+
# Floor details expander
|
1078 |
+
with st.expander("Floor Details", expanded=False):
|
1079 |
+
# Create a table with styled rarities for dark theme
|
1080 |
+
st.markdown("""
|
1081 |
+
<style>
|
1082 |
+
.floor-details-table {
|
1083 |
+
width: 100%;
|
1084 |
+
border-collapse: collapse;
|
1085 |
+
color: #ffffff;
|
1086 |
+
}
|
1087 |
+
|
1088 |
+
.floor-details-table th {
|
1089 |
+
background-color: #333;
|
1090 |
+
padding: 8px;
|
1091 |
+
text-align: left;
|
1092 |
+
border: 1px solid #444;
|
1093 |
+
}
|
1094 |
+
|
1095 |
+
.floor-details-table td {
|
1096 |
+
padding: 8px;
|
1097 |
+
border: 1px solid #444;
|
1098 |
+
}
|
1099 |
+
|
1100 |
+
.floor-details-table tr:nth-child(even) {
|
1101 |
+
background-color: rgba(255,255,255,0.03);
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
.floor-details-table tr:nth-child(odd) {
|
1105 |
+
background-color: rgba(0,0,0,0.2);
|
1106 |
+
}
|
1107 |
+
|
1108 |
+
.floor-details-table tr:hover {
|
1109 |
+
background-color: rgba(13, 110, 253, 0.1);
|
1110 |
+
}
|
1111 |
+
|
1112 |
+
.current-floor {
|
1113 |
+
background-color: rgba(13, 110, 253, 0.15) !important;
|
1114 |
+
}
|
1115 |
+
</style>
|
1116 |
+
|
1117 |
+
<table class="floor-details-table">
|
1118 |
+
<tr>
|
1119 |
+
<th>Floor</th>
|
1120 |
+
<th>Name</th>
|
1121 |
+
<th>Status</th>
|
1122 |
+
<th>Req. Tags</th>
|
1123 |
+
<th>Rarities</th>
|
1124 |
+
<th>Rarity Boost</th>
|
1125 |
+
</tr>
|
1126 |
+
""", unsafe_allow_html=True)
|
1127 |
+
|
1128 |
+
# Add each floor to the table
|
1129 |
+
for i, floor in enumerate(st.session_state.library_floors):
|
1130 |
+
is_unlocked = collection_size >= floor["required_tags"]
|
1131 |
+
is_current = i == current_floor_index
|
1132 |
+
|
1133 |
+
# Format rarities with colors
|
1134 |
+
rarity_text = ""
|
1135 |
+
for rarity in floor.get("rarities", []):
|
1136 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1137 |
+
|
1138 |
+
# Special styling based on rarity
|
1139 |
+
if rarity == "Impuritas Civitas":
|
1140 |
+
rarity_text += f"<span style='animation: rainbow-text 4s linear infinite;'>{rarity}</span>, "
|
1141 |
+
elif rarity == "Star of the City":
|
1142 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 3px gold;'>{rarity}</span>, "
|
1143 |
+
elif rarity == "Urban Nightmare":
|
1144 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 1px #FF5722;'>{rarity}</span>, "
|
1145 |
+
elif rarity == "Urban Plague":
|
1146 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 1px #9C27B0;'>{rarity}</span>, "
|
1147 |
+
else:
|
1148 |
+
rarity_text += f"<span style='color:{color};'>{rarity}</span>, "
|
1149 |
+
|
1150 |
+
# Current floor class
|
1151 |
+
row_class = "current-floor" if is_current else ""
|
1152 |
+
|
1153 |
+
# Add the floor row
|
1154 |
+
st.markdown(f"""
|
1155 |
+
<tr class="{row_class}">
|
1156 |
+
<td>{i+1}</td>
|
1157 |
+
<td>{floor["name"]}</td>
|
1158 |
+
<td>{"🔓 Unlocked" if is_unlocked else "🔒 Locked"}</td>
|
1159 |
+
<td>{floor["required_tags"]}</td>
|
1160 |
+
<td>{rarity_text[:-2] if rarity_text else ""}</td>
|
1161 |
+
<td>+{int(floor.get('rarity_boost', 0) * 100)}%</td>
|
1162 |
+
</tr>
|
1163 |
+
""", unsafe_allow_html=True)
|
1164 |
+
|
1165 |
+
# Close the table
|
1166 |
+
st.markdown("</table>", unsafe_allow_html=True)
|
1167 |
+
|
1168 |
+
def add_discovered_tag(tag, rarity, library_floor=None):
|
1169 |
+
"""
|
1170 |
+
Add a tag to the discovered tags with enriched metadata
|
1171 |
+
|
1172 |
+
Args:
|
1173 |
+
tag: The tag name
|
1174 |
+
rarity: The tag rarity level
|
1175 |
+
library_floor: The library floor where it was discovered (optional)
|
1176 |
+
|
1177 |
+
Returns:
|
1178 |
+
bool: True if it's a new discovery, False if already discovered
|
1179 |
+
"""
|
1180 |
+
is_new = tag not in st.session_state.discovered_tags
|
1181 |
+
|
1182 |
+
# Get current time
|
1183 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
1184 |
+
|
1185 |
+
# Get tag category if metadata is available
|
1186 |
+
category = "unknown"
|
1187 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
1188 |
+
if tag in st.session_state.tag_rarity_metadata:
|
1189 |
+
tag_info = st.session_state.tag_rarity_metadata[tag]
|
1190 |
+
if isinstance(tag_info, dict) and "category" in tag_info:
|
1191 |
+
category = tag_info["category"]
|
1192 |
+
|
1193 |
+
# Get or update tag info
|
1194 |
+
if is_new:
|
1195 |
+
tag_info = {
|
1196 |
+
"rarity": rarity,
|
1197 |
+
"discovery_time": timestamp,
|
1198 |
+
"category": category,
|
1199 |
+
"discovery_count": 1,
|
1200 |
+
"last_seen": timestamp
|
1201 |
+
}
|
1202 |
+
|
1203 |
+
if library_floor:
|
1204 |
+
tag_info["library_floor"] = library_floor
|
1205 |
+
|
1206 |
+
st.session_state.discovered_tags[tag] = tag_info
|
1207 |
+
|
1208 |
+
# Track exploration of library tiers
|
1209 |
+
if 'explored_library_tiers' not in st.session_state:
|
1210 |
+
st.session_state.explored_library_tiers = set()
|
1211 |
+
|
1212 |
+
if library_floor:
|
1213 |
+
st.session_state.explored_library_tiers.add(library_floor)
|
1214 |
+
else:
|
1215 |
+
# Update existing tag
|
1216 |
+
tag_info = st.session_state.discovered_tags[tag]
|
1217 |
+
tag_info["discovery_count"] = tag_info.get("discovery_count", 1) + 1
|
1218 |
+
tag_info["last_seen"] = timestamp
|
1219 |
+
|
1220 |
+
# Only update library floor if provided
|
1221 |
+
if library_floor:
|
1222 |
+
tag_info["library_floor"] = library_floor
|
1223 |
+
|
1224 |
+
# Track exploration
|
1225 |
+
if 'explored_library_tiers' not in st.session_state:
|
1226 |
+
st.session_state.explored_library_tiers = set()
|
1227 |
+
|
1228 |
+
st.session_state.explored_library_tiers.add(library_floor)
|
1229 |
+
|
1230 |
+
# Save state after updating
|
1231 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
1232 |
+
|
1233 |
+
return is_new
|
1234 |
+
|
1235 |
+
def calculate_upgrade_cost(upgrade_type, current_level):
|
1236 |
+
base_cost = {
|
1237 |
+
"speed": 50,
|
1238 |
+
"capacity": 100,
|
1239 |
+
"rarity": 150
|
1240 |
+
}
|
1241 |
+
# Cost increases with each level
|
1242 |
+
return int(base_cost[upgrade_type] * (current_level * 1.5))
|
1243 |
+
|
1244 |
+
def purchase_library_upgrade(upgrade_type):
|
1245 |
+
"""
|
1246 |
+
Purchase a library upgrade
|
1247 |
+
|
1248 |
+
Args:
|
1249 |
+
upgrade_type: The type of upgrade ("speed", "capacity", or "rarity")
|
1250 |
+
|
1251 |
+
Returns:
|
1252 |
+
bool: True if purchased successfully, False otherwise
|
1253 |
+
"""
|
1254 |
+
# Get current level and calculate cost
|
1255 |
+
current_level = st.session_state.library_upgrades.get(upgrade_type, 1)
|
1256 |
+
cost = calculate_upgrade_cost(upgrade_type, current_level)
|
1257 |
+
|
1258 |
+
# Check if player can afford it
|
1259 |
+
if st.session_state.tag_currency < cost:
|
1260 |
+
return False
|
1261 |
+
|
1262 |
+
# Apply purchase
|
1263 |
+
st.session_state.tag_currency -= cost
|
1264 |
+
st.session_state.library_upgrades[upgrade_type] = current_level + 1
|
1265 |
+
|
1266 |
+
# Update stats
|
1267 |
+
if hasattr(st.session_state, 'game_stats'):
|
1268 |
+
if "currency_spent" not in st.session_state.game_stats:
|
1269 |
+
st.session_state.game_stats["currency_spent"] = 0
|
1270 |
+
st.session_state.game_stats["currency_spent"] += cost
|
1271 |
+
|
1272 |
+
# Save state
|
1273 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
1274 |
+
tag_storage.save_game(st.session_state)
|
1275 |
+
|
1276 |
+
return True
|
1277 |
+
|
1278 |
+
def display_library_upgrades():
|
1279 |
+
"""Display and manage upgrades for the library using Streamlit elements with enhanced visuals."""
|
1280 |
+
st.subheader("Library Upgrades")
|
1281 |
+
|
1282 |
+
# Add styling for upgrade cards with dark theme
|
1283 |
+
st.markdown("""
|
1284 |
+
<style>
|
1285 |
+
.upgrade-card {
|
1286 |
+
border: 1px solid #444;
|
1287 |
+
border-radius: 10px;
|
1288 |
+
padding: 15px;
|
1289 |
+
margin-bottom: 20px;
|
1290 |
+
background-color: #222;
|
1291 |
+
color: #ffffff;
|
1292 |
+
transition: transform 0.2s, box-shadow 0.2s;
|
1293 |
+
}
|
1294 |
+
|
1295 |
+
.upgrade-card:hover {
|
1296 |
+
transform: translateY(-2px);
|
1297 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3);
|
1298 |
+
}
|
1299 |
+
|
1300 |
+
.upgrade-title {
|
1301 |
+
font-size: 1.2em;
|
1302 |
+
font-weight: bold;
|
1303 |
+
margin-bottom: 10px;
|
1304 |
+
color: #ffffff;
|
1305 |
+
}
|
1306 |
+
|
1307 |
+
.upgrade-level {
|
1308 |
+
display: inline-block;
|
1309 |
+
background-color: #0d6efd;
|
1310 |
+
color: white;
|
1311 |
+
padding: 3px 8px;
|
1312 |
+
border-radius: 10px;
|
1313 |
+
font-size: 0.8em;
|
1314 |
+
margin-bottom: 10px;
|
1315 |
+
}
|
1316 |
+
|
1317 |
+
.upgrade-stat {
|
1318 |
+
display: flex;
|
1319 |
+
align-items: center;
|
1320 |
+
margin-bottom: 5px;
|
1321 |
+
}
|
1322 |
+
|
1323 |
+
.upgrade-stat-label {
|
1324 |
+
width: 100px;
|
1325 |
+
font-size: 0.9em;
|
1326 |
+
color: #adb5bd;
|
1327 |
+
}
|
1328 |
+
|
1329 |
+
.upgrade-stat-value {
|
1330 |
+
font-weight: bold;
|
1331 |
+
}
|
1332 |
+
|
1333 |
+
.upgrade-cost {
|
1334 |
+
margin-top: 10px;
|
1335 |
+
font-weight: bold;
|
1336 |
+
color: #6610f2;
|
1337 |
+
}
|
1338 |
+
|
1339 |
+
.level-bar {
|
1340 |
+
height: 6px;
|
1341 |
+
background-color: #333;
|
1342 |
+
border-radius: 3px;
|
1343 |
+
margin-bottom: 10px;
|
1344 |
+
overflow: hidden;
|
1345 |
+
}
|
1346 |
+
|
1347 |
+
.level-progress {
|
1348 |
+
height: 100%;
|
1349 |
+
background-color: #0d6efd;
|
1350 |
+
border-radius: 3px;
|
1351 |
+
}
|
1352 |
+
|
1353 |
+
@keyframes pulse-button {
|
1354 |
+
0% { transform: scale(1); }
|
1355 |
+
50% { transform: scale(1.05); }
|
1356 |
+
100% { transform: scale(1); }
|
1357 |
+
}
|
1358 |
+
|
1359 |
+
.pulse-button {
|
1360 |
+
animation: pulse-button 2s infinite;
|
1361 |
+
}
|
1362 |
+
</style>
|
1363 |
+
""", unsafe_allow_html=True)
|
1364 |
+
|
1365 |
+
st.write("Improve your expeditions with these upgrades:")
|
1366 |
+
|
1367 |
+
# Create columns for each upgrade type
|
1368 |
+
col1, col2, col3 = st.columns(3)
|
1369 |
+
|
1370 |
+
# Get current upgrade levels
|
1371 |
+
upgrades = st.session_state.library_upgrades
|
1372 |
+
|
1373 |
+
# Speed upgrade
|
1374 |
+
with col1:
|
1375 |
+
speed_level = upgrades.get("speed", 1)
|
1376 |
+
speed_cost = calculate_upgrade_cost("speed", speed_level)
|
1377 |
+
|
1378 |
+
# Calculate max level for progress bar
|
1379 |
+
max_level = 10
|
1380 |
+
progress_percentage = min(100, (speed_level / max_level) * 100)
|
1381 |
+
|
1382 |
+
# Create an upgrade card with progress bar in dark theme
|
1383 |
+
st.markdown(f"""
|
1384 |
+
<div class="upgrade-card">
|
1385 |
+
<div class="upgrade-title">⏱️ Speed Upgrade</div>
|
1386 |
+
<div class="upgrade-level">Level {speed_level}</div>
|
1387 |
+
<div class="level-bar">
|
1388 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1389 |
+
</div>
|
1390 |
+
<div class="upgrade-stat">
|
1391 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1392 |
+
<span class="upgrade-stat-value">Reduces cooldown time</span>
|
1393 |
+
</div>
|
1394 |
+
<div class="upgrade-stat">
|
1395 |
+
<span class="upgrade-stat-label">Current:</span>
|
1396 |
+
<span class="upgrade-stat-value">{format_time_remaining(calculate_expedition_duration())}</span>
|
1397 |
+
</div>
|
1398 |
+
<div class="upgrade-stat">
|
1399 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1400 |
+
<span class="upgrade-stat-value">{format_time_remaining(calculate_expedition_duration() * 0.9)}</span>
|
1401 |
+
</div>
|
1402 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {speed_cost} {TAG_CURRENCY_NAME}</div>
|
1403 |
+
</div>
|
1404 |
+
""", unsafe_allow_html=True)
|
1405 |
+
|
1406 |
+
# Upgrade button
|
1407 |
+
can_afford = st.session_state.tag_currency >= speed_cost
|
1408 |
+
button_class = "pulse-button" if can_afford else ""
|
1409 |
+
|
1410 |
+
if st.button(f"Upgrade Speed", key=f"upgrade_speed", disabled=not can_afford, use_container_width=True):
|
1411 |
+
if purchase_library_upgrade("speed"):
|
1412 |
+
st.success(f"Speed upgraded to level {speed_level + 1}!")
|
1413 |
+
st.rerun()
|
1414 |
+
else:
|
1415 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {speed_cost}.")
|
1416 |
+
|
1417 |
+
# Capacity upgrade
|
1418 |
+
with col2:
|
1419 |
+
capacity_level = upgrades.get("capacity", 1)
|
1420 |
+
capacity_cost = calculate_upgrade_cost("capacity", capacity_level)
|
1421 |
+
|
1422 |
+
# Calculate max level for progress bar
|
1423 |
+
max_level = 10
|
1424 |
+
progress_percentage = min(100, (capacity_level / max_level) * 100)
|
1425 |
+
|
1426 |
+
# Create an upgrade card with progress bar in dark theme
|
1427 |
+
st.markdown(f"""
|
1428 |
+
<div class="upgrade-card">
|
1429 |
+
<div class="upgrade-title">🔍 Capacity Upgrade</div>
|
1430 |
+
<div class="upgrade-level">Level {capacity_level}</div>
|
1431 |
+
<div class="level-bar">
|
1432 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1433 |
+
</div>
|
1434 |
+
<div class="upgrade-stat">
|
1435 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1436 |
+
<span class="upgrade-stat-value">Increases tags discovered</span>
|
1437 |
+
</div>
|
1438 |
+
<div class="upgrade-stat">
|
1439 |
+
<span class="upgrade-stat-label">Current:</span>
|
1440 |
+
<span class="upgrade-stat-value">{calculate_expedition_capacity()} tags</span>
|
1441 |
+
</div>
|
1442 |
+
<div class="upgrade-stat">
|
1443 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1444 |
+
<span class="upgrade-stat-value">{calculate_expedition_capacity() + 1} tags</span>
|
1445 |
+
</div>
|
1446 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {capacity_cost} {TAG_CURRENCY_NAME}</div>
|
1447 |
+
</div>
|
1448 |
+
""", unsafe_allow_html=True)
|
1449 |
+
|
1450 |
+
# Upgrade button
|
1451 |
+
can_afford = st.session_state.tag_currency >= capacity_cost
|
1452 |
+
button_class = "pulse-button" if can_afford else ""
|
1453 |
+
|
1454 |
+
if st.button(f"Upgrade Capacity", key=f"upgrade_capacity", disabled=not can_afford, use_container_width=True):
|
1455 |
+
if purchase_library_upgrade("capacity"):
|
1456 |
+
st.success(f"Capacity upgraded to level {capacity_level + 1}!")
|
1457 |
+
st.rerun()
|
1458 |
+
else:
|
1459 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {capacity_cost}.")
|
1460 |
+
|
1461 |
+
# Rarity upgrade
|
1462 |
+
with col3:
|
1463 |
+
rarity_level = upgrades.get("rarity", 1)
|
1464 |
+
rarity_cost = calculate_upgrade_cost("rarity", rarity_level)
|
1465 |
+
|
1466 |
+
# Calculate max level for progress bar
|
1467 |
+
max_level = 10
|
1468 |
+
progress_percentage = min(100, (rarity_level / max_level) * 100)
|
1469 |
+
|
1470 |
+
# Create an upgrade card with progress bar in dark theme
|
1471 |
+
st.markdown(f"""
|
1472 |
+
<div class="upgrade-card">
|
1473 |
+
<div class="upgrade-title">💎 Rarity Upgrade</div>
|
1474 |
+
<div class="upgrade-level">Level {rarity_level}</div>
|
1475 |
+
<div class="level-bar">
|
1476 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1477 |
+
</div>
|
1478 |
+
<div class="upgrade-stat">
|
1479 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1480 |
+
<span class="upgrade-stat-value">Improves rare tag chance</span>
|
1481 |
+
</div>
|
1482 |
+
<div class="upgrade-stat">
|
1483 |
+
<span class="upgrade-stat-label">Current:</span>
|
1484 |
+
<span class="upgrade-stat-value">+{(rarity_level - 1) * 20}% boost</span>
|
1485 |
+
</div>
|
1486 |
+
<div class="upgrade-stat">
|
1487 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1488 |
+
<span class="upgrade-stat-value">+{rarity_level * 20}% boost</span>
|
1489 |
+
</div>
|
1490 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {rarity_cost} {TAG_CURRENCY_NAME}</div>
|
1491 |
+
</div>
|
1492 |
+
""", unsafe_allow_html=True)
|
1493 |
+
|
1494 |
+
# Upgrade button
|
1495 |
+
can_afford = st.session_state.tag_currency >= rarity_cost
|
1496 |
+
button_class = "pulse-button" if can_afford else ""
|
1497 |
+
|
1498 |
+
if st.button(f"Upgrade Rarity", key=f"upgrade_rarity", disabled=not can_afford, use_container_width=True):
|
1499 |
+
if purchase_library_upgrade("rarity"):
|
1500 |
+
st.success(f"Rapacity upgraded to level {rarity_level + 1}!")
|
1501 |
+
st.rerun()
|
1502 |
+
else:
|
1503 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {rarity_cost}.")
|
1504 |
+
|
1505 |
+
# Add a styled info box about library growth with dark theme
|
1506 |
+
st.markdown("""
|
1507 |
+
<div style="background-color: rgba(13, 110, 253, 0.15);
|
1508 |
+
border-left: 4px solid #0d6efd;
|
1509 |
+
border-radius: 4px;
|
1510 |
+
padding: 15px;
|
1511 |
+
margin-top: 20px;
|
1512 |
+
color: #ffffff;">
|
1513 |
+
<h4 style="margin-top: 0; color: #6495ED;">📚 Library Growth</h4>
|
1514 |
+
<p style="margin-bottom: 0;">
|
1515 |
+
Your library will grow as you collect more tags. Each floor of the library unlocks new rarities and
|
1516 |
+
improves your chances of finding rare tags. Continue collecting tags to unlock deeper levels of the library!
|
1517 |
+
</p>
|
1518 |
+
</div>
|
1519 |
+
""", unsafe_allow_html=True)
|
1520 |
+
|
1521 |
+
def initialize_library_system():
|
1522 |
+
"""Initialize the library system state in session state if not already present."""
|
1523 |
+
if 'library_system_initialized' not in st.session_state:
|
1524 |
+
st.session_state.library_system_initialized = True
|
1525 |
+
|
1526 |
+
# Try to load from storage first
|
1527 |
+
library_state = tag_storage.load_library_state(st.session_state)
|
1528 |
+
|
1529 |
+
if library_state:
|
1530 |
+
# We already have the state loaded into session_state by the load function
|
1531 |
+
print("Library system loaded from storage.")
|
1532 |
+
else:
|
1533 |
+
# Initialize with defaults
|
1534 |
+
st.session_state.discovered_tags = {} # {tag_name: {"rarity": str, "discovery_time": timestamp, "category": str}}
|
1535 |
+
st.session_state.library_exploration_history = [] # List of recent library explorations
|
1536 |
+
|
1537 |
+
# Initialize enkephalin if not present
|
1538 |
+
if 'enkephalin' not in st.session_state:
|
1539 |
+
st.session_state.enkephalin = 0
|
1540 |
+
|
1541 |
+
# For the library interface
|
1542 |
+
st.session_state.expedition_results = [] # Results from completed expeditions
|
1543 |
+
|
1544 |
+
# Library growth system
|
1545 |
+
st.session_state.library_growth = {
|
1546 |
+
"total_discoveries": 0,
|
1547 |
+
"last_discovery_time": time.time()
|
1548 |
+
}
|
1549 |
+
|
1550 |
+
# Upgrade system for library
|
1551 |
+
st.session_state.library_upgrades = {
|
1552 |
+
"speed": 1, # Expedition speed (reduces cooldown time)
|
1553 |
+
"capacity": 1, # Tags discovered per expedition
|
1554 |
+
"rarity": 1 # Rare tag chance
|
1555 |
+
}
|
1556 |
+
|
1557 |
+
# Set of explored library tiers
|
1558 |
+
st.session_state.explored_library_tiers = set()
|
1559 |
+
|
1560 |
+
print("Library system initialized with defaults.")
|
1561 |
+
|
1562 |
+
# Store library floors in session state if not already there
|
1563 |
+
if 'library_floors' not in st.session_state:
|
1564 |
+
st.session_state.library_floors = LIBRARY_FLOORS
|
1565 |
+
|
1566 |
+
# Update categories for any "unknown" category tags
|
1567 |
+
update_discovered_tag_categories()
|
1568 |
+
|
1569 |
+
# Add CSS animations for styling
|
1570 |
+
st.markdown("""
|
1571 |
+
<style>
|
1572 |
+
/* Star of the City animation */
|
1573 |
+
@keyframes star-glow {
|
1574 |
+
0% { text-shadow: 0 0 5px #FFD700; }
|
1575 |
+
50% { text-shadow: 0 0 15px #FFD700; }
|
1576 |
+
100% { text-shadow: 0 0 5px #FFD700; }
|
1577 |
+
}
|
1578 |
+
|
1579 |
+
/* Impuritas Civitas animation */
|
1580 |
+
@keyframes rainbow-text {
|
1581 |
+
0% { color: red; }
|
1582 |
+
14% { color: orange; }
|
1583 |
+
28% { color: yellow; }
|
1584 |
+
42% { color: green; }
|
1585 |
+
57% { color: blue; }
|
1586 |
+
71% { color: indigo; }
|
1587 |
+
85% { color: violet; }
|
1588 |
+
100% { color: red; }
|
1589 |
+
}
|
1590 |
+
|
1591 |
+
@keyframes rainbow-border {
|
1592 |
+
0% { border-color: red; }
|
1593 |
+
14% { border-color: orange; }
|
1594 |
+
28% { border-color: yellow; }
|
1595 |
+
42% { border-color: green; }
|
1596 |
+
57% { border-color: blue; }
|
1597 |
+
71% { border-color: indigo; }
|
1598 |
+
85% { border-color: violet; }
|
1599 |
+
100% { border-color: red; }
|
1600 |
+
}
|
1601 |
+
|
1602 |
+
/* Urban Nightmare animation */
|
1603 |
+
@keyframes nightmare-pulse {
|
1604 |
+
0% { border-color: #FF9800; }
|
1605 |
+
50% { border-color: #FF5722; }
|
1606 |
+
100% { border-color: #FF9800; }
|
1607 |
+
}
|
1608 |
+
|
1609 |
+
/* Urban Plague subtle effect */
|
1610 |
+
.glow-purple {
|
1611 |
+
text-shadow: 0 0 3px #9C27B0;
|
1612 |
+
}
|
1613 |
+
|
1614 |
+
/* Apply the animations to specific rarity classes */
|
1615 |
+
.star-of-city {
|
1616 |
+
animation: star-glow 2s infinite;
|
1617 |
+
font-weight: bold;
|
1618 |
+
}
|
1619 |
+
|
1620 |
+
.impuritas-civitas {
|
1621 |
+
animation: rainbow-text 4s linear infinite;
|
1622 |
+
font-weight: bold;
|
1623 |
+
}
|
1624 |
+
|
1625 |
+
.urban-nightmare {
|
1626 |
+
animation: nightmare-pulse 3s infinite;
|
1627 |
+
font-weight: bold;
|
1628 |
+
}
|
1629 |
+
|
1630 |
+
.urban-plague {
|
1631 |
+
text-shadow: 0 0 3px #9C27B0;
|
1632 |
+
font-weight: bold;
|
1633 |
+
}
|
1634 |
+
</style>
|
1635 |
+
""", unsafe_allow_html=True)
|
1636 |
+
|
1637 |
+
def display_library_extraction():
|
1638 |
+
"""Display the library exploration interface."""
|
1639 |
+
initialize_library_system()
|
1640 |
+
|
1641 |
+
st.title(f"Welcome to {LIBRARY_INFO['name']}")
|
1642 |
+
st.markdown(f"""
|
1643 |
+
<div style="background-color: rgba(74, 20, 140, 0.15);
|
1644 |
+
border-radius: 10px;
|
1645 |
+
padding: 15px;
|
1646 |
+
margin-bottom: 20px;
|
1647 |
+
border-left: 5px solid {LIBRARY_INFO['color']};
|
1648 |
+
color: #ffffff;">
|
1649 |
+
<p style="margin: 0;">{LIBRARY_INFO['description']}</p>
|
1650 |
+
</div>
|
1651 |
+
""", unsafe_allow_html=True)
|
1652 |
+
|
1653 |
+
# Create tabs with enhanced styling for dark theme
|
1654 |
+
st.markdown("""
|
1655 |
+
<style>
|
1656 |
+
/* Custom styling for tabs */
|
1657 |
+
.stTabs [data-baseweb="tab-list"] {
|
1658 |
+
gap: 2px;
|
1659 |
+
}
|
1660 |
+
|
1661 |
+
.stTabs [data-baseweb="tab"] {
|
1662 |
+
border-radius: 5px 5px 0 0;
|
1663 |
+
padding: 10px 16px;
|
1664 |
+
font-weight: 600;
|
1665 |
+
}
|
1666 |
+
</style>
|
1667 |
+
""", unsafe_allow_html=True)
|
1668 |
+
|
1669 |
+
# Store current tab index in session state if not present
|
1670 |
+
if 'library_tab_index' not in st.session_state:
|
1671 |
+
st.session_state.library_tab_index = 0
|
1672 |
+
|
1673 |
+
# Create expanded tabs - removed the essence tab
|
1674 |
+
explore_tab, discovered_tab, building_tab = st.tabs([
|
1675 |
+
"📚 Library Exploration",
|
1676 |
+
"🔍 Discovered Tags",
|
1677 |
+
"🏛️ Library Building"
|
1678 |
+
])
|
1679 |
+
|
1680 |
+
with explore_tab:
|
1681 |
+
st.session_state.library_tab_index = 0
|
1682 |
+
display_library_exploration_interface()
|
1683 |
+
|
1684 |
+
with discovered_tab:
|
1685 |
+
st.session_state.library_tab_index = 1
|
1686 |
+
st.subheader("Your Discovered Tags")
|
1687 |
+
st.write("These are tags you've discovered through the library system. They differ from your collected tags, which are obtained from scanning images.")
|
1688 |
+
|
1689 |
+
# Display discovered tags using our new function
|
1690 |
+
display_discovered_tags()
|
1691 |
+
|
1692 |
+
with building_tab:
|
1693 |
+
st.session_state.library_tab_index = 2
|
1694 |
+
display_library_building()
|
1695 |
+
|
1696 |
+
def display_discovered_tags():
|
1697 |
+
"""Display the user's discovered tags with the same visual style as the tag collection"""
|
1698 |
+
# Show total unique discovered tags
|
1699 |
+
if not hasattr(st.session_state, 'discovered_tags') or not st.session_state.discovered_tags:
|
1700 |
+
st.info("Explore the library to discover new tags!")
|
1701 |
+
return
|
1702 |
+
|
1703 |
+
unique_tags = len(st.session_state.discovered_tags)
|
1704 |
+
st.write(f"You have discovered {unique_tags} unique tags.")
|
1705 |
+
|
1706 |
+
# Count tags by rarity
|
1707 |
+
rarity_counts = {}
|
1708 |
+
for tag_info in st.session_state.discovered_tags.values():
|
1709 |
+
rarity = tag_info.get("rarity", "Unknown")
|
1710 |
+
if rarity not in rarity_counts:
|
1711 |
+
rarity_counts[rarity] = 0
|
1712 |
+
rarity_counts[rarity] += 1
|
1713 |
+
|
1714 |
+
# Only display rarity categories that have tags
|
1715 |
+
active_rarities = {r: c for r, c in rarity_counts.items() if c > 0}
|
1716 |
+
|
1717 |
+
# If there are active rarities to display
|
1718 |
+
if active_rarities:
|
1719 |
+
display_discovered_rarity_distribution(active_rarities)
|
1720 |
+
|
1721 |
+
# Add a sorting option
|
1722 |
+
sort_options = ["Category (rarest first)", "Rarity", "Discovery Time"]
|
1723 |
+
selected_sort = st.selectbox("Sort tags by:", sort_options, key="discovered_tags_sort")
|
1724 |
+
|
1725 |
+
# Group tags by the selected method
|
1726 |
+
if selected_sort == "Category (rarest first)":
|
1727 |
+
# Group tags by category
|
1728 |
+
categories = {}
|
1729 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1730 |
+
category = info.get("category", "unknown")
|
1731 |
+
if category not in categories:
|
1732 |
+
categories[category] = []
|
1733 |
+
categories[category].append((tag, info))
|
1734 |
+
|
1735 |
+
# Display tags by category in expanders
|
1736 |
+
for category, tags in sorted(categories.items()):
|
1737 |
+
# Get rarity order for sorting
|
1738 |
+
rarity_order = list(RARITY_LEVELS.keys())
|
1739 |
+
|
1740 |
+
# Sort tags by rarity (rarest first)
|
1741 |
+
def get_rarity_index(tag_tuple):
|
1742 |
+
tag, info = tag_tuple
|
1743 |
+
rarity = info.get("rarity", "Unknown")
|
1744 |
+
if rarity in rarity_order:
|
1745 |
+
return len(rarity_order) - rarity_order.index(rarity)
|
1746 |
+
return 0
|
1747 |
+
|
1748 |
+
sorted_tags = sorted(tags, key=get_rarity_index, reverse=True)
|
1749 |
+
|
1750 |
+
# Check if category has any rare tags
|
1751 |
+
has_rare_tags = any(info.get("rarity") in ["Impuritas Civitas", "Star of the City"]
|
1752 |
+
for _, info in sorted_tags)
|
1753 |
+
|
1754 |
+
# Get category info if available
|
1755 |
+
category_display = category.capitalize()
|
1756 |
+
if category in TAG_CATEGORIES:
|
1757 |
+
category_info = TAG_CATEGORIES[category]
|
1758 |
+
category_icon = category_info.get("icon", "")
|
1759 |
+
category_color = category_info.get("color", "#888888")
|
1760 |
+
category_display = f"<span style='color:{category_color};'>{category_icon} {category.capitalize()}</span>"
|
1761 |
+
|
1762 |
+
# Create header with information about rare tags if present
|
1763 |
+
header = f"{category_display} ({len(tags)} tags)"
|
1764 |
+
if has_rare_tags:
|
1765 |
+
header += " ✨ Contains rare tags!"
|
1766 |
+
|
1767 |
+
# Display the header and expander
|
1768 |
+
st.markdown(header, unsafe_allow_html=True)
|
1769 |
+
with st.expander("Show/Hide", expanded=has_rare_tags):
|
1770 |
+
# Group by rarity within category
|
1771 |
+
rarity_groups = {}
|
1772 |
+
for tag, info in sorted_tags:
|
1773 |
+
rarity = info.get("rarity", "Unknown")
|
1774 |
+
if rarity not in rarity_groups:
|
1775 |
+
rarity_groups[rarity] = []
|
1776 |
+
rarity_groups[rarity].append((tag, info))
|
1777 |
+
|
1778 |
+
# Display each rarity group in order (rarest first)
|
1779 |
+
for rarity in reversed(rarity_order):
|
1780 |
+
if rarity in rarity_groups:
|
1781 |
+
tags_in_rarity = rarity_groups[rarity]
|
1782 |
+
if tags_in_rarity:
|
1783 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1784 |
+
|
1785 |
+
# Special styling for rare rarities
|
1786 |
+
if rarity == "Impuritas Civitas":
|
1787 |
+
rarity_style = f"animation:rainbow-text 4s linear infinite;font-weight:bold;"
|
1788 |
+
elif rarity == "Star of the City":
|
1789 |
+
rarity_style = f"color:{color};text-shadow:0 0 3px gold;font-weight:bold;"
|
1790 |
+
elif rarity == "Urban Nightmare":
|
1791 |
+
rarity_style = f"color:{color};text-shadow:0 0 1px #FF5722;font-weight:bold;"
|
1792 |
+
else:
|
1793 |
+
rarity_style = f"color:{color};font-weight:bold;"
|
1794 |
+
|
1795 |
+
st.markdown(f"<span style='{rarity_style}'>{rarity.capitalize()}</span> ({len(tags_in_rarity)} tags)", unsafe_allow_html=True)
|
1796 |
+
display_discovered_tag_grid(tags_in_rarity)
|
1797 |
+
st.markdown("---")
|
1798 |
+
|
1799 |
+
elif selected_sort == "Rarity":
|
1800 |
+
# Group tags by rarity level
|
1801 |
+
rarity_groups = {}
|
1802 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1803 |
+
rarity = info.get("rarity", "Unknown")
|
1804 |
+
if rarity not in rarity_groups:
|
1805 |
+
rarity_groups[rarity] = []
|
1806 |
+
rarity_groups[rarity].append((tag, info))
|
1807 |
+
|
1808 |
+
# Get ordered rarities (rarest first)
|
1809 |
+
ordered_rarities = list(RARITY_LEVELS.keys())
|
1810 |
+
ordered_rarities.reverse() # Reverse to show rarest first
|
1811 |
+
|
1812 |
+
# Display tags by rarity
|
1813 |
+
for rarity in ordered_rarities:
|
1814 |
+
if rarity in rarity_groups:
|
1815 |
+
tags = rarity_groups[rarity]
|
1816 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1817 |
+
|
1818 |
+
# Add special styling for rare rarities
|
1819 |
+
rarity_html = f"<span style='color:{color};font-weight:bold;'>{rarity.capitalize()}</span>"
|
1820 |
+
if rarity == "Impuritas Civitas":
|
1821 |
+
rarity_html = f"<span style='animation:rainbow-text 4s linear infinite;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1822 |
+
elif rarity == "Star of the City":
|
1823 |
+
rarity_html = f"<span style='color:{color};text-shadow:0 0 3px gold;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1824 |
+
elif rarity == "Urban Nightmare":
|
1825 |
+
rarity_html = f"<span style='color:{color};text-shadow:0 0 1px #FF5722;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1826 |
+
|
1827 |
+
# First create the title with HTML, then use it in the expander
|
1828 |
+
st.markdown(f"### {rarity_html} ({len(tags)} tags)", unsafe_allow_html=True)
|
1829 |
+
with st.expander("Show/Hide", expanded=rarity in ["Impuritas Civitas", "Star of the City"]):
|
1830 |
+
# Group by category within rarity
|
1831 |
+
category_groups = {}
|
1832 |
+
for tag, info in tags:
|
1833 |
+
category = info.get("category", "unknown")
|
1834 |
+
if category not in category_groups:
|
1835 |
+
category_groups[category] = []
|
1836 |
+
category_groups[category].append((tag, info))
|
1837 |
+
|
1838 |
+
# Display each category within this rarity level
|
1839 |
+
for category, category_tags in sorted(category_groups.items()):
|
1840 |
+
# Get category info if available
|
1841 |
+
category_display = category.capitalize()
|
1842 |
+
if category in TAG_CATEGORIES:
|
1843 |
+
category_info = TAG_CATEGORIES[category]
|
1844 |
+
category_icon = category_info.get("icon", "")
|
1845 |
+
category_color = category_info.get("color", "#888888")
|
1846 |
+
category_display = f"<span style='color:{category_color};'>{category_icon} {category.capitalize()}</span>"
|
1847 |
+
|
1848 |
+
st.markdown(f"#### {category_display} ({len(category_tags)} tags)", unsafe_allow_html=True)
|
1849 |
+
display_discovered_tag_grid(category_tags)
|
1850 |
+
st.markdown("---")
|
1851 |
+
|
1852 |
+
elif selected_sort == "Discovery Time":
|
1853 |
+
# Sort all tags by discovery time (newest first)
|
1854 |
+
sorted_tags = []
|
1855 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1856 |
+
discovery_time = info.get("discovery_time", "")
|
1857 |
+
sorted_tags.append((tag, info, discovery_time))
|
1858 |
+
|
1859 |
+
sorted_tags.sort(key=lambda x: x[2], reverse=True) # Sort by time, newest first
|
1860 |
+
|
1861 |
+
# Group by date
|
1862 |
+
date_groups = {}
|
1863 |
+
for tag, info, time_str in sorted_tags:
|
1864 |
+
# Extract just the date part if timestamp has date and time
|
1865 |
+
date = time_str.split()[0] if " " in time_str else time_str
|
1866 |
+
|
1867 |
+
if date not in date_groups:
|
1868 |
+
date_groups[date] = []
|
1869 |
+
date_groups[date].append((tag, info))
|
1870 |
+
|
1871 |
+
# Display tags grouped by discovery date
|
1872 |
+
for date, tags in date_groups.items():
|
1873 |
+
date_display = date if date else "Unknown date"
|
1874 |
+
st.markdown(f"### Discovered on {date_display} ({len(tags)} tags)")
|
1875 |
+
|
1876 |
+
with st.expander("Show/Hide", expanded=date == list(date_groups.keys())[0]): # Expand most recent by default
|
1877 |
+
display_discovered_tag_grid(tags)
|
1878 |
+
st.markdown("---")
|
1879 |
+
|
1880 |
+
def display_discovered_rarity_distribution(active_rarities):
|
1881 |
+
"""Display distribution of discovered tags by rarity with themed animations"""
|
1882 |
+
# Add the necessary CSS for animations
|
1883 |
+
st.markdown("""
|
1884 |
+
<style>
|
1885 |
+
@keyframes grid-glow {
|
1886 |
+
0% { text-shadow: 0 0 2px gold; }
|
1887 |
+
50% { text-shadow: 0 0 6px gold; }
|
1888 |
+
100% { text-shadow: 0 0 2px gold; }
|
1889 |
+
}
|
1890 |
+
|
1891 |
+
@keyframes grid-rainbow {
|
1892 |
+
0% { color: red; }
|
1893 |
+
14% { color: orange; }
|
1894 |
+
28% { color: yellow; }
|
1895 |
+
42% { color: green; }
|
1896 |
+
57% { color: blue; }
|
1897 |
+
71% { color: indigo; }
|
1898 |
+
85% { color: violet; }
|
1899 |
+
100% { color: red; }
|
1900 |
+
}
|
1901 |
+
|
1902 |
+
@keyframes grid-pulse {
|
1903 |
+
0% { opacity: 0.8; }
|
1904 |
+
50% { opacity: 1; }
|
1905 |
+
100% { opacity: 0.8; }
|
1906 |
+
}
|
1907 |
+
|
1908 |
+
.grid-star {
|
1909 |
+
text-shadow: 0 0 3px gold;
|
1910 |
+
animation: grid-glow 2s infinite;
|
1911 |
+
}
|
1912 |
+
|
1913 |
+
.grid-impuritas {
|
1914 |
+
animation: grid-rainbow 4s linear infinite;
|
1915 |
+
}
|
1916 |
+
|
1917 |
+
.grid-nightmare {
|
1918 |
+
text-shadow: 0 0 1px #FF5722;
|
1919 |
+
animation: grid-pulse 3s infinite;
|
1920 |
+
}
|
1921 |
+
|
1922 |
+
.grid-plague {
|
1923 |
+
text-shadow: 0 0 1px #9C27B0;
|
1924 |
+
}
|
1925 |
+
</style>
|
1926 |
+
""", unsafe_allow_html=True)
|
1927 |
+
|
1928 |
+
rarity_cols = st.columns(len(active_rarities))
|
1929 |
+
for i, (rarity, count) in enumerate(active_rarities.items()):
|
1930 |
+
with rarity_cols[i]:
|
1931 |
+
# Get color with fallback
|
1932 |
+
color = RARITY_LEVELS.get(rarity, {}).get("color", "#888888")
|
1933 |
+
|
1934 |
+
# Apply special styling based on rarity
|
1935 |
+
style = f"color:{color};font-weight:bold;"
|
1936 |
+
class_name = ""
|
1937 |
+
|
1938 |
+
if rarity == "Impuritas Civitas":
|
1939 |
+
class_name = "grid-impuritas"
|
1940 |
+
elif rarity == "Star of the City":
|
1941 |
+
class_name = "grid-star"
|
1942 |
+
elif rarity == "Urban Nightmare":
|
1943 |
+
class_name = "grid-nightmare"
|
1944 |
+
elif rarity == "Urban Plague":
|
1945 |
+
class_name = "grid-plague"
|
1946 |
+
|
1947 |
+
if class_name:
|
1948 |
+
st.markdown(
|
1949 |
+
f"<div style='text-align:center;'><span class='{class_name}' style='font-weight:bold;'>{rarity.capitalize()}</span><br>{count}</div>",
|
1950 |
+
unsafe_allow_html=True
|
1951 |
+
)
|
1952 |
+
else:
|
1953 |
+
st.markdown(
|
1954 |
+
f"<div style='text-align:center;'><span style='{style}'>{rarity.capitalize()}</span><br>{count}</div>",
|
1955 |
+
unsafe_allow_html=True
|
1956 |
+
)
|
1957 |
+
|
1958 |
+
def display_discovered_tag_grid(tags):
|
1959 |
+
"""Display discovered tags in a grid layout with discovery information"""
|
1960 |
+
# Create a grid layout for tags
|
1961 |
+
cols = st.columns(3)
|
1962 |
+
for i, (tag, info) in enumerate(sorted(tags)):
|
1963 |
+
col_idx = i % 3
|
1964 |
+
with cols[col_idx]:
|
1965 |
+
rarity = info.get("rarity", "Unknown")
|
1966 |
+
discovery_time = info.get("discovery_time", "")
|
1967 |
+
library_floor = info.get("library_floor", "")
|
1968 |
+
discovery_count = info.get("discovery_count", 1)
|
1969 |
+
|
1970 |
+
color = RARITY_LEVELS.get(rarity, {}).get("color", "#888888")
|
1971 |
+
|
1972 |
+
# Get sample count if available
|
1973 |
+
sample_count = None
|
1974 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
1975 |
+
if tag in st.session_state.tag_rarity_metadata:
|
1976 |
+
tag_info = st.session_state.tag_rarity_metadata[tag]
|
1977 |
+
if isinstance(tag_info, dict) and "sample_count" in tag_info:
|
1978 |
+
sample_count = tag_info["sample_count"]
|
1979 |
+
|
1980 |
+
# Format sample count
|
1981 |
+
sample_display = ""
|
1982 |
+
if sample_count is not None:
|
1983 |
+
if sample_count >= 1000000:
|
1984 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count/1000000:.1f}M)</span>"
|
1985 |
+
elif sample_count >= 1000:
|
1986 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count/1000:.1f}K)</span>"
|
1987 |
+
else:
|
1988 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count})</span>"
|
1989 |
+
|
1990 |
+
# Apply special styling for rare tags
|
1991 |
+
tag_html = tag
|
1992 |
+
if rarity == "Impuritas Civitas":
|
1993 |
+
tag_html = f"<span style='animation: rainbow-text 4s linear infinite;'>{tag}</span>"
|
1994 |
+
elif rarity == "Star of the City":
|
1995 |
+
tag_html = f"<span style='text-shadow: 0 0 3px gold;'>{tag}</span>"
|
1996 |
+
elif rarity == "Urban Nightmare":
|
1997 |
+
tag_html = f"<span style='text-shadow: 0 0 1px #FF9800;'>{tag}</span>"
|
1998 |
+
|
1999 |
+
# Display tag with rarity badge and discovery info
|
2000 |
+
st.markdown(
|
2001 |
+
f"{tag_html} <span style='background-color:{color};color:white;padding:2px 6px;border-radius:10px;font-size:0.8em;'>{rarity.capitalize()}</span> {sample_display}",
|
2002 |
+
unsafe_allow_html=True
|
2003 |
+
)
|
2004 |
+
|
2005 |
+
# Show discovery details
|
2006 |
+
if library_floor:
|
2007 |
+
st.markdown(f"<span style='font-size:0.85em;'>Found in: {library_floor}</span>", unsafe_allow_html=True)
|
2008 |
+
|
2009 |
+
if discovery_count > 1:
|
2010 |
+
st.markdown(f"<span style='font-size:0.85em;'>Seen {discovery_count} times</span>", unsafe_allow_html=True)
|
game/mosaics/templates/1st_costume_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/animal_crossing_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/arknights_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/azur_lane_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/blue_archive_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/boku_no_hero_academia_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/casual_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/chainsaw_man_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/character_extended_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/company_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/cosplay_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/disgaea_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/disney_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/dragon_ball_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/dungeon_and_fighter_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/elsword_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/emblem_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/ensemble_stars!_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/fate_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/ff14_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/fire_emblem_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/flower_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/food_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/genshin_impact_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/girls'_frontline_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/girls_und_panzer_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/granblue_fantasy_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/honkai_impact_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/honkai_star_rail_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/housamo_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/idolmaster_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/jojo_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/kancolle_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/kemono_friends_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/kirby_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/league_of_legends_template.png
ADDED
![]() |
Git LFS Details
|