Spaces:
No application file
No application file
Upload 11 files
Browse files- Docker +33 -0
- README.md +2 -9
- app.py +627 -0
- config.py +11 -0
- main.py +138 -0
- output_writer.py +40 -0
- priority_logic.py +85 -0
- processor.py +277 -0
- requirements.txt +4 -0
- sheet_reader.py +9 -0
- utils.py +30 -0
Docker
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Base image with Python
|
2 |
+
FROM python:3.12
|
3 |
+
|
4 |
+
#Create a user to avoid running as root
|
5 |
+
RUN useradd -m -u 1000 user
|
6 |
+
USER user
|
7 |
+
|
8 |
+
# Set environment variables
|
9 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
10 |
+
|
11 |
+
# Set working directory
|
12 |
+
WORKDIR /app
|
13 |
+
|
14 |
+
# Install system dependencies if needed
|
15 |
+
RUN apt-get update && apt-get install -y \
|
16 |
+
build-essential \
|
17 |
+
python3-dev \
|
18 |
+
&& rm -rf /var/lib/apt/lists/*
|
19 |
+
|
20 |
+
# Copy requirements
|
21 |
+
COPY requirements.txt .
|
22 |
+
|
23 |
+
# Install Python dependencies
|
24 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
25 |
+
|
26 |
+
# Copy app files
|
27 |
+
COPY . .
|
28 |
+
|
29 |
+
# Expose Hugging Face Spaces port
|
30 |
+
EXPOSE 7860
|
31 |
+
|
32 |
+
# Run the Gradio app
|
33 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
@@ -1,13 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.43.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
short_description: Boosts efficiency of carpet manufacturing
|
11 |
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Dyeing_Urgency_Priority_App_
|
3 |
+
app_file: gradio_priority_app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.43.1
|
|
|
|
|
|
|
6 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,627 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
from datetime import datetime
|
6 |
+
import tempfile
|
7 |
+
from collections import defaultdict
|
8 |
+
|
9 |
+
# Required columns for dyeing priority calculation
|
10 |
+
REQUIRED_COLS = [
|
11 |
+
"Account",
|
12 |
+
"Order #",
|
13 |
+
"DESIGN",
|
14 |
+
"Labels",
|
15 |
+
"Colours",
|
16 |
+
"Kgs",
|
17 |
+
"Pending"
|
18 |
+
]
|
19 |
+
|
20 |
+
# Additional columns that might be present
|
21 |
+
OPTIONAL_COLS = ["Sqm", "Unnamed: 0"]
|
22 |
+
|
23 |
+
def _normalize_columns(df: pd.DataFrame) -> pd.DataFrame:
|
24 |
+
"""Normalize column names by stripping whitespace"""
|
25 |
+
df = df.copy()
|
26 |
+
df.columns = [str(c).strip() for c in df.columns]
|
27 |
+
return df
|
28 |
+
|
29 |
+
def _parse_colours(colour_str):
|
30 |
+
"""Parse colour string into list of individual colours"""
|
31 |
+
if pd.isna(colour_str):
|
32 |
+
return []
|
33 |
+
|
34 |
+
# Handle various separators (comma, semicolon, pipe, etc.)
|
35 |
+
colour_str = str(colour_str).strip()
|
36 |
+
|
37 |
+
# Try different separators
|
38 |
+
for sep in [',', ';', '|', '/', '+', '&']:
|
39 |
+
if sep in colour_str:
|
40 |
+
colours = [c.strip().upper() for c in colour_str.split(sep) if c.strip()]
|
41 |
+
return colours
|
42 |
+
|
43 |
+
# If no separators found, treat as single colour
|
44 |
+
return [colour_str.upper()] if colour_str else []
|
45 |
+
|
46 |
+
def calculate_colour_totals(df: pd.DataFrame) -> pd.DataFrame:
|
47 |
+
"""Calculate total quantity required for each colour across all designs"""
|
48 |
+
colour_totals = defaultdict(float)
|
49 |
+
colour_details = defaultdict(list) # Track which designs use each colour
|
50 |
+
|
51 |
+
for _, row in df.iterrows():
|
52 |
+
colours = _parse_colours(row['Colours'])
|
53 |
+
kgs = pd.to_numeric(row['Kgs'], errors='coerce') or 0
|
54 |
+
design = str(row.get('DESIGN', 'Unknown'))
|
55 |
+
order_num = str(row.get('Order #', 'Unknown'))
|
56 |
+
|
57 |
+
if colours and kgs > 0:
|
58 |
+
# Distribute weight equally among colours if multiple colours
|
59 |
+
kgs_per_colour = kgs / len(colours)
|
60 |
+
for colour in colours:
|
61 |
+
colour_totals[colour] += kgs_per_colour
|
62 |
+
colour_details[colour].append({
|
63 |
+
'Design': design,
|
64 |
+
'Order': order_num,
|
65 |
+
'Kgs_Contribution': kgs_per_colour,
|
66 |
+
'Total_Order_Kgs': kgs
|
67 |
+
})
|
68 |
+
|
69 |
+
# Convert to DataFrame with detailed breakdown
|
70 |
+
colour_rows = []
|
71 |
+
for colour, total_kgs in sorted(colour_totals.items(), key=lambda x: x[1], reverse=True):
|
72 |
+
designs_using = list(set([detail['Design'] for detail in colour_details[colour]]))
|
73 |
+
orders_count = len(colour_details[colour])
|
74 |
+
|
75 |
+
colour_rows.append({
|
76 |
+
'Colour': colour,
|
77 |
+
'Total_Kgs_Required': round(total_kgs, 2),
|
78 |
+
'Designs_Using_This_Colour': ', '.join(sorted(designs_using)),
|
79 |
+
'Number_of_Orders': orders_count,
|
80 |
+
'Priority_Rank': len(colour_rows) + 1
|
81 |
+
})
|
82 |
+
|
83 |
+
colour_df = pd.DataFrame(colour_rows)
|
84 |
+
return colour_df, colour_details
|
85 |
+
|
86 |
+
def create_detailed_colour_breakdown(colour_details: dict) -> pd.DataFrame:
|
87 |
+
"""Create detailed breakdown showing which orders contribute to each colour"""
|
88 |
+
breakdown_rows = []
|
89 |
+
|
90 |
+
for colour, details in colour_details.items():
|
91 |
+
for detail in details:
|
92 |
+
breakdown_rows.append({
|
93 |
+
'Colour': colour,
|
94 |
+
'Design': detail['Design'],
|
95 |
+
'Order_Number': detail['Order'],
|
96 |
+
'Kgs_for_This_Colour': round(detail['Kgs_Contribution'], 2),
|
97 |
+
'Total_Order_Kgs': detail['Total_Order_Kgs']
|
98 |
+
})
|
99 |
+
|
100 |
+
breakdown_df = pd.DataFrame(breakdown_rows)
|
101 |
+
# Sort by colour, then by kgs contribution (descending)
|
102 |
+
breakdown_df = breakdown_df.sort_values(['Colour', 'Kgs_for_This_Colour'], ascending=[True, False])
|
103 |
+
|
104 |
+
return breakdown_df
|
105 |
+
|
106 |
+
def detect_date_columns(df: pd.DataFrame) -> list:
|
107 |
+
"""Detect date columns in the dataframe"""
|
108 |
+
date_columns = []
|
109 |
+
|
110 |
+
for col in df.columns:
|
111 |
+
col_str = str(col).strip()
|
112 |
+
|
113 |
+
# Try to parse as datetime
|
114 |
+
try:
|
115 |
+
pd.to_datetime(col_str)
|
116 |
+
date_columns.append(col)
|
117 |
+
except:
|
118 |
+
# Check for date patterns like "13/8", "14/8"
|
119 |
+
if '/' in col_str and len(col_str.split('/')) == 2:
|
120 |
+
try:
|
121 |
+
parts = col_str.split('/')
|
122 |
+
if all(part.isdigit() for part in parts):
|
123 |
+
date_columns.append(col)
|
124 |
+
except:
|
125 |
+
pass
|
126 |
+
|
127 |
+
return date_columns
|
128 |
+
|
129 |
+
def find_earliest_order_date(df: pd.DataFrame) -> pd.Series:
|
130 |
+
"""Find the earliest date for each order from date columns"""
|
131 |
+
date_columns = detect_date_columns(df)
|
132 |
+
|
133 |
+
if not date_columns:
|
134 |
+
# No date columns found, assign all orders as very old (high priority)
|
135 |
+
return pd.Series([365] * len(df), index=df.index) # 365 days old
|
136 |
+
|
137 |
+
earliest_dates = []
|
138 |
+
|
139 |
+
for idx, row in df.iterrows():
|
140 |
+
order_dates = []
|
141 |
+
|
142 |
+
for date_col in date_columns:
|
143 |
+
cell_value = row[date_col]
|
144 |
+
|
145 |
+
# Skip if cell is empty or contains non-date data
|
146 |
+
if pd.isna(cell_value) or cell_value == 0 or cell_value == "":
|
147 |
+
continue
|
148 |
+
|
149 |
+
# Try to parse date from column name
|
150 |
+
try:
|
151 |
+
if '/' in str(date_col):
|
152 |
+
# Handle formats like "13/8" (day/month)
|
153 |
+
day, month = str(date_col).split('/')
|
154 |
+
# Assume current year
|
155 |
+
date_obj = pd.to_datetime(f"2025-{month.zfill(2)}-{day.zfill(2)}")
|
156 |
+
else:
|
157 |
+
# Handle datetime column names
|
158 |
+
date_obj = pd.to_datetime(str(date_col))
|
159 |
+
|
160 |
+
# If there's actual data in this cell (not empty/zero), consider this date
|
161 |
+
if not pd.isna(cell_value) and str(cell_value).strip() != "" and str(cell_value) != "0":
|
162 |
+
order_dates.append(date_obj)
|
163 |
+
|
164 |
+
except:
|
165 |
+
continue
|
166 |
+
|
167 |
+
# Find earliest date for this order
|
168 |
+
if order_dates:
|
169 |
+
earliest_date = min(order_dates)
|
170 |
+
else:
|
171 |
+
# No valid dates found, assign a default old date
|
172 |
+
earliest_date = pd.to_datetime("2024-01-01")
|
173 |
+
|
174 |
+
earliest_dates.append(earliest_date)
|
175 |
+
|
176 |
+
return pd.Series(earliest_dates, index=df.index)
|
177 |
+
|
178 |
+
def compute_dyeing_priority(df: pd.DataFrame, min_kgs: int = 100, weights: dict = None) -> tuple:
|
179 |
+
"""
|
180 |
+
Compute dyeing priority based on:
|
181 |
+
1. Oldest orders with minimum kgs per design
|
182 |
+
2. Designs with fewest colours
|
183 |
+
3. Order age
|
184 |
+
"""
|
185 |
+
|
186 |
+
# Default weights if not provided
|
187 |
+
if weights is None:
|
188 |
+
weights = {"AGE_WEIGHT": 50, "COLOUR_SIMPLICITY_WEIGHT": 30, "DESIGN_WEIGHT": 20}
|
189 |
+
|
190 |
+
df = _normalize_columns(df)
|
191 |
+
|
192 |
+
# Check for required columns (excluding Date which is now optional)
|
193 |
+
missing = [c for c in REQUIRED_COLS if c not in df.columns]
|
194 |
+
if missing:
|
195 |
+
raise ValueError(f"Missing required columns: {missing}. Found columns: {list(df.columns)}")
|
196 |
+
|
197 |
+
# Create working copy
|
198 |
+
out = df.copy()
|
199 |
+
|
200 |
+
# Find earliest order dates from date columns
|
201 |
+
out["OrderDate"] = find_earliest_order_date(out)
|
202 |
+
|
203 |
+
# Calculate age in days
|
204 |
+
today = pd.Timestamp.now().normalize()
|
205 |
+
out["OrderAgeDays"] = (today - out["OrderDate"]).dt.days
|
206 |
+
out["OrderAgeDays"] = out["OrderAgeDays"].fillna(0).clip(lower=0)
|
207 |
+
|
208 |
+
# Convert Kgs to numeric
|
209 |
+
out["Kgs"] = pd.to_numeric(out["Kgs"], errors="coerce").fillna(0)
|
210 |
+
|
211 |
+
# Parse colours and count them
|
212 |
+
out["ColourList"] = out["Colours"].apply(_parse_colours)
|
213 |
+
out["ColourCount"] = out["ColourList"].apply(len)
|
214 |
+
|
215 |
+
# Group by design to calculate design-level metrics
|
216 |
+
design_groups = out.groupby("DESIGN").agg({
|
217 |
+
"Kgs": "sum",
|
218 |
+
"OrderDate": "min", # Oldest date for this design
|
219 |
+
"OrderAgeDays": "max", # Maximum age for this design
|
220 |
+
"ColourCount": "first", # Colour count should be same for same design
|
221 |
+
"Order #": "count" # Number of orders for this design
|
222 |
+
}).reset_index()
|
223 |
+
|
224 |
+
design_groups.columns = ["DESIGN", "Total_Kgs", "Oldest_Date", "Max_Age_Days", "ColourCount", "Order_Count"]
|
225 |
+
|
226 |
+
# Filter designs that meet minimum kg requirement
|
227 |
+
design_groups["MeetsMinKgs"] = design_groups["Total_Kgs"] >= min_kgs
|
228 |
+
|
229 |
+
# Calculate scores for designs that meet criteria
|
230 |
+
eligible_designs = design_groups[design_groups["MeetsMinKgs"]].copy()
|
231 |
+
|
232 |
+
if len(eligible_designs) == 0:
|
233 |
+
# If no designs meet criteria, include all for ranking
|
234 |
+
eligible_designs = design_groups.copy()
|
235 |
+
eligible_designs["MeetsMinKgs"] = False
|
236 |
+
|
237 |
+
# Age Score (0-1, older = higher)
|
238 |
+
if eligible_designs["Max_Age_Days"].max() > 0:
|
239 |
+
eligible_designs["AgeScore_01"] = eligible_designs["Max_Age_Days"] / eligible_designs["Max_Age_Days"].max()
|
240 |
+
else:
|
241 |
+
eligible_designs["AgeScore_01"] = 0
|
242 |
+
|
243 |
+
# Colour Simplicity Score (0-1, fewer colours = higher)
|
244 |
+
if eligible_designs["ColourCount"].max() > 0:
|
245 |
+
eligible_designs["ColourSimplicityScore_01"] = 1 - (eligible_designs["ColourCount"] / eligible_designs["ColourCount"].max())
|
246 |
+
else:
|
247 |
+
eligible_designs["ColourSimplicityScore_01"] = 0
|
248 |
+
|
249 |
+
# Design Volume Score (0-1, more kgs = higher priority for production efficiency)
|
250 |
+
if eligible_designs["Total_Kgs"].max() > 0:
|
251 |
+
eligible_designs["VolumeScore_01"] = eligible_designs["Total_Kgs"] / eligible_designs["Total_Kgs"].max()
|
252 |
+
else:
|
253 |
+
eligible_designs["VolumeScore_01"] = 0
|
254 |
+
|
255 |
+
# Calculate weighted priority scores
|
256 |
+
w_age = weights["AGE_WEIGHT"] / 100.0
|
257 |
+
w_colour = weights["COLOUR_SIMPLICITY_WEIGHT"] / 100.0
|
258 |
+
w_design = weights["DESIGN_WEIGHT"] / 100.0
|
259 |
+
|
260 |
+
eligible_designs["AgeScore"] = eligible_designs["AgeScore_01"] * w_age
|
261 |
+
eligible_designs["ColourSimplicityScore"] = eligible_designs["ColourSimplicityScore_01"] * w_colour
|
262 |
+
eligible_designs["VolumeScore"] = eligible_designs["VolumeScore_01"] * w_design
|
263 |
+
|
264 |
+
eligible_designs["PriorityScore"] = (
|
265 |
+
eligible_designs["AgeScore"] +
|
266 |
+
eligible_designs["ColourSimplicityScore"] +
|
267 |
+
eligible_designs["VolumeScore"]
|
268 |
+
)
|
269 |
+
|
270 |
+
# Sort by priority
|
271 |
+
eligible_designs = eligible_designs.sort_values(
|
272 |
+
["MeetsMinKgs", "PriorityScore", "Max_Age_Days"],
|
273 |
+
ascending=[False, False, False]
|
274 |
+
)
|
275 |
+
|
276 |
+
# Join back to original data to get detailed view
|
277 |
+
detailed_results = out.merge(
|
278 |
+
eligible_designs[["DESIGN", "Total_Kgs", "Max_Age_Days", "MeetsMinKgs",
|
279 |
+
"AgeScore", "ColourSimplicityScore", "VolumeScore", "PriorityScore"]],
|
280 |
+
on="DESIGN",
|
281 |
+
how="left"
|
282 |
+
)
|
283 |
+
|
284 |
+
# Sort detailed results by priority
|
285 |
+
detailed_results = detailed_results.sort_values(
|
286 |
+
["MeetsMinKgs", "PriorityScore", "OrderAgeDays"],
|
287 |
+
ascending=[False, False, False]
|
288 |
+
)
|
289 |
+
|
290 |
+
# Calculate colour totals with detailed breakdown
|
291 |
+
colour_totals, colour_details = calculate_colour_totals(out)
|
292 |
+
colour_breakdown = create_detailed_colour_breakdown(colour_details)
|
293 |
+
|
294 |
+
return detailed_results, eligible_designs, colour_totals, colour_breakdown
|
295 |
+
|
296 |
+
def save_dyeing_results(detailed_df, design_summary, colour_totals, colour_breakdown, output_path, min_kgs, weights):
|
297 |
+
"""Save all results with multiple sheets"""
|
298 |
+
|
299 |
+
with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
|
300 |
+
|
301 |
+
# Sheet 1: Colour Requirements Summary (MAIN PRIORITY - what you need most!)
|
302 |
+
colour_totals.to_excel(writer, sheet_name='COLOUR_REQUIREMENTS', index=False)
|
303 |
+
|
304 |
+
# Sheet 2: Detailed Colour Breakdown (which orders contribute to each colour)
|
305 |
+
colour_breakdown.to_excel(writer, sheet_name='Colour_Order_Breakdown', index=False)
|
306 |
+
|
307 |
+
# Sheet 3: Design Summary (design-level priority ranking)
|
308 |
+
design_summary.to_excel(writer, sheet_name='Design_Priority_Summary', index=False)
|
309 |
+
|
310 |
+
# Sheet 4: Detailed Order Priority
|
311 |
+
detailed_df.to_excel(writer, sheet_name='Order_Priority_Detail', index=False)
|
312 |
+
|
313 |
+
# Sheet 5: Instructions
|
314 |
+
instructions_data = [
|
315 |
+
['🎨 DYEING PRIORITY & COLOUR REQUIREMENTS ANALYSIS'],
|
316 |
+
[''],
|
317 |
+
['📋 SHEET EXPLANATIONS:'],
|
318 |
+
[''],
|
319 |
+
['1. COLOUR_REQUIREMENTS - 🎯 MAIN OUTPUT YOU NEED'],
|
320 |
+
[' • Total kgs needed for each colour (consolidated across all designs)'],
|
321 |
+
[' • No colour repetition - each colour listed once with total quantity'],
|
322 |
+
[' • Sorted by quantity (highest first) for production planning'],
|
323 |
+
[' • Shows which designs use each colour and order count'],
|
324 |
+
[''],
|
325 |
+
['2. Colour_Order_Breakdown - Detailed breakdown'],
|
326 |
+
[' • Shows exactly which orders contribute to each colour total'],
|
327 |
+
[' • Useful for tracking and verification'],
|
328 |
+
[''],
|
329 |
+
['3. Design_Priority_Summary - Design-level priorities'],
|
330 |
+
[' • Ranked by priority score for production sequence'],
|
331 |
+
[''],
|
332 |
+
['4. Order_Priority_Detail - Individual order details'],
|
333 |
+
[' • All orders with calculated priority scores'],
|
334 |
+
[''],
|
335 |
+
['🎯 PRIORITY METHODOLOGY:'],
|
336 |
+
[f'• Age Weight: {weights["AGE_WEIGHT"]}% - Prioritizes older orders'],
|
337 |
+
[f'• Colour Simplicity Weight: {weights["COLOUR_SIMPLICITY_WEIGHT"]}% - Fewer colours = higher priority'],
|
338 |
+
[f'• Design Volume Weight: {weights["DESIGN_WEIGHT"]}% - Larger quantities get priority'],
|
339 |
+
[f'• Minimum Kgs Threshold: {min_kgs} - Only designs with total kgs >= this value are prioritized'],
|
340 |
+
[''],
|
341 |
+
['🎨 COLOUR CONSOLIDATION LOGIC:'],
|
342 |
+
['• If RED is used in Design-A (100kg) and Design-B (50kg)'],
|
343 |
+
['• Output shows: RED = 150kg total (no repetition)'],
|
344 |
+
['• Helps plan exact dye batch quantities needed'],
|
345 |
+
['• Multi-colour orders split proportionally (e.g., "Red,Blue" 100kg = 50kg each)'],
|
346 |
+
[''],
|
347 |
+
['📊 USAGE RECOMMENDATIONS:'],
|
348 |
+
['• Use COLOUR_REQUIREMENTS sheet for dye purchasing/batching'],
|
349 |
+
['• Use Design_Priority_Summary for production sequence planning'],
|
350 |
+
['• Check Colour_Order_Breakdown for detailed verification'],
|
351 |
+
[''],
|
352 |
+
[f'Generated on: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}']
|
353 |
+
]
|
354 |
+
|
355 |
+
instructions_df = pd.DataFrame(instructions_data, columns=['Instructions'])
|
356 |
+
instructions_df.to_excel(writer, sheet_name='Instructions', index=False)
|
357 |
+
|
358 |
+
# Gradio Interface Functions
|
359 |
+
def load_excel(file):
|
360 |
+
"""Load Excel file and return available sheet names"""
|
361 |
+
if file is None:
|
362 |
+
return gr.Dropdown(choices=[]), "Please upload a file first."
|
363 |
+
|
364 |
+
try:
|
365 |
+
xls = pd.ExcelFile(file.name)
|
366 |
+
return gr.Dropdown(choices=xls.sheet_names, value=xls.sheet_names[0]), "✅ File loaded successfully!"
|
367 |
+
except Exception as e:
|
368 |
+
return gr.Dropdown(choices=[]), f"❌ Error loading file: {str(e)}"
|
369 |
+
|
370 |
+
def validate_weights(age_weight, colour_weight, design_weight):
|
371 |
+
"""Validate that weights sum to 100%"""
|
372 |
+
total = age_weight + colour_weight + design_weight
|
373 |
+
if total == 100:
|
374 |
+
return "✅ Weights are valid (sum = 100%)"
|
375 |
+
else:
|
376 |
+
return f"⚠️ Weights sum to {total}%. Please adjust to equal 100%."
|
377 |
+
|
378 |
+
def preview_dyeing_data(file, sheet_name):
|
379 |
+
"""Preview the selected sheet data for dyeing analysis"""
|
380 |
+
if file is None or not sheet_name:
|
381 |
+
return "Please upload a file and select a sheet first.", pd.DataFrame()
|
382 |
+
|
383 |
+
try:
|
384 |
+
df = pd.read_excel(file.name, sheet_name=sheet_name)
|
385 |
+
|
386 |
+
# Show basic info
|
387 |
+
preview_info = f"📊 **Sheet: {sheet_name}**\n"
|
388 |
+
preview_info += f"- Rows: {len(df)}\n"
|
389 |
+
preview_info += f"- Columns: {len(df.columns)}\n\n"
|
390 |
+
|
391 |
+
# Check for required columns
|
392 |
+
df_norm = df.copy()
|
393 |
+
df_norm.columns = [str(c).strip() for c in df_norm.columns]
|
394 |
+
missing = [c for c in REQUIRED_COLS if c not in df_norm.columns]
|
395 |
+
|
396 |
+
if missing:
|
397 |
+
preview_info += f"❌ **Missing required columns:** {missing}\n\n"
|
398 |
+
else:
|
399 |
+
preview_info += "✅ **All required columns found!**\n\n"
|
400 |
+
|
401 |
+
# Detect date columns
|
402 |
+
date_columns = detect_date_columns(df_norm)
|
403 |
+
if date_columns:
|
404 |
+
preview_info += f"📅 **Date columns detected:** {len(date_columns)} columns\n"
|
405 |
+
preview_info += f" Sample dates: {date_columns[:5]}\n\n"
|
406 |
+
else:
|
407 |
+
preview_info += "⚠️ **No date columns detected** - will use default prioritization\n\n"
|
408 |
+
|
409 |
+
# Show some statistics
|
410 |
+
if 'Kgs' in df_norm.columns:
|
411 |
+
total_kgs = pd.to_numeric(df_norm['Kgs'], errors='coerce').sum()
|
412 |
+
preview_info += f"**Total Kgs:** {total_kgs:,.1f}\n"
|
413 |
+
|
414 |
+
if 'DESIGN' in df_norm.columns:
|
415 |
+
unique_designs = df_norm['DESIGN'].nunique()
|
416 |
+
preview_info += f"**Unique Designs:** {unique_designs}\n"
|
417 |
+
|
418 |
+
preview_info += f"\n**Available columns:**\n"
|
419 |
+
for i, col in enumerate(df.columns, 1):
|
420 |
+
marker = "📅" if col in date_columns else ""
|
421 |
+
preview_info += f"{i}. {col} {marker}\n"
|
422 |
+
|
423 |
+
# Show first few rows
|
424 |
+
preview_df = df.head(5)
|
425 |
+
|
426 |
+
return preview_info, preview_df
|
427 |
+
|
428 |
+
except Exception as e:
|
429 |
+
return f"❌ Error previewing data: {str(e)}", pd.DataFrame()
|
430 |
+
|
431 |
+
def process_dyeing_priority(file, sheet_name, age_weight, colour_weight, design_weight, min_kgs):
|
432 |
+
"""Main processing function for dyeing priorities"""
|
433 |
+
|
434 |
+
if file is None:
|
435 |
+
return None, None, None, "❌ Please upload a file first."
|
436 |
+
|
437 |
+
if not sheet_name:
|
438 |
+
return None, None, None, "❌ Please select a sheet."
|
439 |
+
|
440 |
+
# Validate weights
|
441 |
+
total_weight = age_weight + colour_weight + design_weight
|
442 |
+
if total_weight != 100:
|
443 |
+
return None, None, None, f"❌ Error: Total weight must equal 100% (currently {total_weight}%)"
|
444 |
+
|
445 |
+
try:
|
446 |
+
# Load data
|
447 |
+
df = pd.read_excel(file.name, sheet_name=sheet_name)
|
448 |
+
|
449 |
+
if df.empty:
|
450 |
+
return None, None, None, "❌ The selected sheet is empty."
|
451 |
+
|
452 |
+
# Prepare weights
|
453 |
+
weights = {
|
454 |
+
"AGE_WEIGHT": age_weight,
|
455 |
+
"COLOUR_SIMPLICITY_WEIGHT": colour_weight,
|
456 |
+
"DESIGN_WEIGHT": design_weight
|
457 |
+
}
|
458 |
+
|
459 |
+
# Compute priorities
|
460 |
+
detailed_results, design_summary, colour_totals, colour_breakdown = compute_dyeing_priority(
|
461 |
+
df, min_kgs=min_kgs, weights=weights
|
462 |
+
)
|
463 |
+
|
464 |
+
# Create temporary output file
|
465 |
+
output_path = tempfile.NamedTemporaryFile(delete=False, suffix='.xlsx').name
|
466 |
+
save_dyeing_results(detailed_results, design_summary, colour_totals, colour_breakdown, output_path, min_kgs, weights)
|
467 |
+
|
468 |
+
# Create success message
|
469 |
+
total_designs = len(design_summary)
|
470 |
+
eligible_designs = sum(design_summary['MeetsMinKgs'])
|
471 |
+
total_colours = len(colour_totals)
|
472 |
+
top_colours = colour_totals.head(3)['Colour'].tolist() if len(colour_totals) > 0 else []
|
473 |
+
|
474 |
+
success_msg = f"✅ Dyeing Priority Analysis Complete!\n"
|
475 |
+
success_msg += f"📊 SUMMARY:\n"
|
476 |
+
success_msg += f"- Total Designs Analyzed: {total_designs}\n"
|
477 |
+
success_msg += f"- Designs Meeting {min_kgs}kg Threshold: {eligible_designs}\n"
|
478 |
+
success_msg += f"- Unique Colours Required: {total_colours}\n"
|
479 |
+
if top_colours:
|
480 |
+
success_msg += f"- Top 3 Colours by Volume: {', '.join(top_colours)}\n"
|
481 |
+
success_msg += f"- Highest Priority Score: {design_summary['PriorityScore'].max():.3f}\n\n"
|
482 |
+
success_msg += f"🎨 COLOUR REQUIREMENTS sheet contains consolidated totals!\n"
|
483 |
+
success_msg += f"📥 Download complete analysis below"
|
484 |
+
|
485 |
+
return output_path, design_summary.head(10), colour_totals.head(15), success_msg
|
486 |
+
|
487 |
+
except Exception as e:
|
488 |
+
return None, None, None, f"❌ Error processing data: {str(e)}"
|
489 |
+
|
490 |
+
# Create Gradio Interface
|
491 |
+
def create_dyeing_interface():
|
492 |
+
with gr.Blocks(title="Dyeing Urgency Priority Calculator", theme=gr.themes.Soft()) as demo:
|
493 |
+
|
494 |
+
gr.Markdown("""
|
495 |
+
# 🎨 Dyeing Urgency Priority Calculator
|
496 |
+
|
497 |
+
Upload your Excel file with dyeing/textile manufacturing data to calculate production priorities based on:
|
498 |
+
- **Order Age**: Prioritize older orders first (detects dates from column headers)
|
499 |
+
- **Colour Simplicity**: Fewer colours = easier production
|
500 |
+
- **Design Volume**: Larger quantities for efficiency
|
501 |
+
|
502 |
+
**Expected Columns**: Account, Order #, DESIGN, Labels, Colours, Kgs, Pending
|
503 |
+
**Date Detection**: Automatically detects date columns (like 2025-01-08, 13/8, etc.)
|
504 |
+
""")
|
505 |
+
|
506 |
+
with gr.Row():
|
507 |
+
with gr.Column(scale=1):
|
508 |
+
gr.Markdown("## 📁 File Upload & Selection")
|
509 |
+
|
510 |
+
file_input = gr.File(
|
511 |
+
label="Upload Excel File",
|
512 |
+
file_types=[".xlsx", ".xls"],
|
513 |
+
type="filepath"
|
514 |
+
)
|
515 |
+
|
516 |
+
sheet_dropdown = gr.Dropdown(
|
517 |
+
label="Select Sheet",
|
518 |
+
choices=[],
|
519 |
+
interactive=True
|
520 |
+
)
|
521 |
+
|
522 |
+
file_status = gr.Textbox(label="File Status", interactive=False)
|
523 |
+
|
524 |
+
with gr.Column(scale=1):
|
525 |
+
gr.Markdown("## ⚖️ Priority Weights (must sum to 100%)")
|
526 |
+
|
527 |
+
age_weight = gr.Slider(
|
528 |
+
minimum=0, maximum=100, value=50, step=1,
|
529 |
+
label="Age Weight (%)",
|
530 |
+
info="Higher = prioritize older orders more"
|
531 |
+
)
|
532 |
+
|
533 |
+
colour_weight = gr.Slider(
|
534 |
+
minimum=0, maximum=100, value=30, step=1,
|
535 |
+
label="Colour Simplicity Weight (%)",
|
536 |
+
info="Higher = prioritize designs with fewer colours"
|
537 |
+
)
|
538 |
+
|
539 |
+
design_weight = gr.Slider(
|
540 |
+
minimum=0, maximum=100, value=20, step=1,
|
541 |
+
label="Design Volume Weight (%)",
|
542 |
+
info="Higher = prioritize larger quantity designs"
|
543 |
+
)
|
544 |
+
|
545 |
+
weight_status = gr.Textbox(label="Weight Validation", interactive=False)
|
546 |
+
|
547 |
+
min_kgs = gr.Number(
|
548 |
+
label="Minimum Kgs Threshold per Design",
|
549 |
+
value=100,
|
550 |
+
info="Only designs with total kgs >= this value get priority"
|
551 |
+
)
|
552 |
+
|
553 |
+
with gr.Row():
|
554 |
+
preview_btn = gr.Button("👁️ Preview Data", variant="secondary")
|
555 |
+
process_btn = gr.Button("🎨 Calculate Dyeing Priorities", variant="primary", size="lg")
|
556 |
+
|
557 |
+
with gr.Row():
|
558 |
+
with gr.Column():
|
559 |
+
gr.Markdown("## 📊 Data Preview")
|
560 |
+
preview_info = gr.Textbox(label="Data Information", lines=10, interactive=False)
|
561 |
+
preview_table = gr.Dataframe(label="Sample Data")
|
562 |
+
|
563 |
+
with gr.Row():
|
564 |
+
with gr.Column():
|
565 |
+
gr.Markdown("## 🏆 Priority Results")
|
566 |
+
results_info = gr.Textbox(label="Processing Status", interactive=False)
|
567 |
+
|
568 |
+
with gr.Column():
|
569 |
+
download_file = gr.File(label="📥 Download Complete Analysis")
|
570 |
+
|
571 |
+
with gr.Row():
|
572 |
+
with gr.Column():
|
573 |
+
gr.Markdown("## 📋 Top Design Priorities")
|
574 |
+
design_results = gr.Dataframe(label="Design Priority Summary")
|
575 |
+
|
576 |
+
with gr.Column():
|
577 |
+
gr.Markdown("## 🎨 Colour Requirements (Consolidated)")
|
578 |
+
colour_results = gr.Dataframe(
|
579 |
+
label="Total Kgs Required Per Colour",
|
580 |
+
headers=["Colour", "Total Kgs", "Used in Designs", "Orders Count"],
|
581 |
+
interactive=False
|
582 |
+
)
|
583 |
+
|
584 |
+
# Event handlers
|
585 |
+
file_input.change(
|
586 |
+
fn=load_excel,
|
587 |
+
inputs=[file_input],
|
588 |
+
outputs=[sheet_dropdown, file_status]
|
589 |
+
)
|
590 |
+
|
591 |
+
for weight_input in [age_weight, colour_weight, design_weight]:
|
592 |
+
weight_input.change(
|
593 |
+
fn=validate_weights,
|
594 |
+
inputs=[age_weight, colour_weight, design_weight],
|
595 |
+
outputs=[weight_status]
|
596 |
+
)
|
597 |
+
|
598 |
+
preview_btn.click(
|
599 |
+
fn=preview_dyeing_data,
|
600 |
+
inputs=[file_input, sheet_dropdown],
|
601 |
+
outputs=[preview_info, preview_table]
|
602 |
+
)
|
603 |
+
|
604 |
+
process_btn.click(
|
605 |
+
fn=process_dyeing_priority,
|
606 |
+
inputs=[file_input, sheet_dropdown, age_weight, colour_weight, design_weight, min_kgs],
|
607 |
+
outputs=[download_file, design_results, colour_results, results_info]
|
608 |
+
)
|
609 |
+
|
610 |
+
# Initialize weight validation
|
611 |
+
demo.load(
|
612 |
+
fn=validate_weights,
|
613 |
+
inputs=[age_weight, colour_weight, design_weight],
|
614 |
+
outputs=[weight_status]
|
615 |
+
)
|
616 |
+
|
617 |
+
return demo
|
618 |
+
|
619 |
+
# Launch the app
|
620 |
+
if __name__ == "__main__":
|
621 |
+
demo = create_dyeing_interface()
|
622 |
+
demo.launch(
|
623 |
+
#server_name="0.0.0.0",
|
624 |
+
#server_port=7860,
|
625 |
+
share=True,
|
626 |
+
debug=True
|
627 |
+
)
|
config.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# config.py
|
2 |
+
# User-adjustable weights for the priority scoring (must sum to 100)
|
3 |
+
WEIGHTS = {
|
4 |
+
"AGE_WEIGHT": 60, # % weight for Order Age
|
5 |
+
"COMPONENT_WEIGHT": 30, # % weight for Simplicity (fewer components)
|
6 |
+
"MANUAL_WEIGHT": 10 # % weight for Manual override
|
7 |
+
}
|
8 |
+
|
9 |
+
# Validate
|
10 |
+
if sum(WEIGHTS.values()) != 100:
|
11 |
+
raise ValueError("The weights in config.py must sum to 100.")
|
main.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from processor import ManufacturingProcessor, get_file_preview
|
3 |
+
from utils import prompt_weights
|
4 |
+
|
5 |
+
def main():
|
6 |
+
print("🏭 Manufacturing Priority Decision Helper")
|
7 |
+
|
8 |
+
# Get file path
|
9 |
+
file_path = input("Enter the full path to your Excel file: ").strip()
|
10 |
+
if not os.path.exists(file_path):
|
11 |
+
print("❌ File not found.")
|
12 |
+
return
|
13 |
+
|
14 |
+
# Initialize processor
|
15 |
+
try:
|
16 |
+
processor = ManufacturingProcessor()
|
17 |
+
file_info = processor.get_file_info(file_path)
|
18 |
+
except Exception as e:
|
19 |
+
print(f"❌ Unable to read Excel file: {e}")
|
20 |
+
return
|
21 |
+
|
22 |
+
# Show available sheets
|
23 |
+
print(f"\n📑 Available sheets in '{file_info['file_name']}':")
|
24 |
+
for i, sheet_name in enumerate(file_info['sheets'], start=1):
|
25 |
+
print(f" {i}. {sheet_name}")
|
26 |
+
|
27 |
+
# Sheet selection
|
28 |
+
while True:
|
29 |
+
try:
|
30 |
+
idx = int(input("Select a sheet by number: "))
|
31 |
+
if 1 <= idx <= len(file_info['sheets']):
|
32 |
+
selected_sheet = file_info['sheets'][idx-1]
|
33 |
+
break
|
34 |
+
except ValueError:
|
35 |
+
pass
|
36 |
+
print("⚠️ Invalid selection, try again.")
|
37 |
+
|
38 |
+
print(f"✅ Selected sheet: {selected_sheet}")
|
39 |
+
|
40 |
+
# Preview data and validate
|
41 |
+
print("\n🔍 Analyzing data...")
|
42 |
+
try:
|
43 |
+
preview = get_file_preview(file_path, selected_sheet)
|
44 |
+
validation = preview['validation']
|
45 |
+
|
46 |
+
print(f"📊 Data Summary:")
|
47 |
+
print(f" - Rows: {validation['row_count']}")
|
48 |
+
print(f" - Available columns: {len(validation['available_columns'])}")
|
49 |
+
|
50 |
+
if not validation['valid']:
|
51 |
+
print(f"\n❌ Data validation failed:")
|
52 |
+
print(f" Missing required columns: {validation['missing_columns']}")
|
53 |
+
return
|
54 |
+
|
55 |
+
if validation['data_issues']:
|
56 |
+
print(f"\n⚠️ Data quality issues found:")
|
57 |
+
for issue in validation['data_issues']:
|
58 |
+
print(f" - {issue}")
|
59 |
+
|
60 |
+
continue_anyway = input("\nContinue processing anyway? (y/N): ").strip().lower()
|
61 |
+
if continue_anyway != 'y':
|
62 |
+
return
|
63 |
+
|
64 |
+
print("✅ Data validation passed!")
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
print(f"❌ Error analyzing data: {e}")
|
68 |
+
return
|
69 |
+
|
70 |
+
# Weight adjustment (optional)
|
71 |
+
print(f"\n⚖️ Current weights: Age={processor.weights['AGE_WEIGHT']}%, "
|
72 |
+
f"Component={processor.weights['COMPONENT_WEIGHT']}%, "
|
73 |
+
f"Manual={processor.weights['MANUAL_WEIGHT']}%")
|
74 |
+
|
75 |
+
adjust_weights = input("Would you like to adjust weights? (y/N): ").strip().lower()
|
76 |
+
if adjust_weights == 'y':
|
77 |
+
try:
|
78 |
+
new_weights = prompt_weights(processor.weights.copy())
|
79 |
+
processor.weights = new_weights
|
80 |
+
print(f"✅ Updated weights: {new_weights}")
|
81 |
+
except Exception as e:
|
82 |
+
print(f"⚠️ Error setting weights, using defaults: {e}")
|
83 |
+
|
84 |
+
# Quantity threshold
|
85 |
+
try:
|
86 |
+
min_qty_input = input("Enter minimum quantity threshold for FIFO (default 50): ").strip()
|
87 |
+
min_qty = int(min_qty_input) if min_qty_input else 50
|
88 |
+
except ValueError:
|
89 |
+
min_qty = 50
|
90 |
+
print("⚠️ Invalid input, using default threshold of 50")
|
91 |
+
|
92 |
+
# Process the data
|
93 |
+
print(f"\n🔄 Processing data with minimum quantity threshold: {min_qty}")
|
94 |
+
try:
|
95 |
+
processed_df, processing_info = processor.process_file(
|
96 |
+
file_path, selected_sheet, min_qty
|
97 |
+
)
|
98 |
+
|
99 |
+
print("✅ Priority calculation completed!")
|
100 |
+
print(f"📈 Results summary:")
|
101 |
+
print(f" - Total products: {processing_info['total_products']}")
|
102 |
+
print(f" - Products above threshold: {processing_info['products_above_threshold']}")
|
103 |
+
print(f" - Highest priority score: {processing_info['highest_priority_score']:.4f}")
|
104 |
+
|
105 |
+
except Exception as e:
|
106 |
+
print(f"❌ Error processing data: {e}")
|
107 |
+
return
|
108 |
+
|
109 |
+
# Show preview of results
|
110 |
+
print(f"\n🏆 Top 10 Priority Results:")
|
111 |
+
display_cols = [c for c in ["Name of Product", "Components Used", "Quantity of Each Component",
|
112 |
+
"Oldest Product Required First", "Priority Assigned",
|
113 |
+
"OrderAgeDays", "ComponentCount", "QtyThresholdOK", "PriorityScore"]
|
114 |
+
if c in processed_df.columns]
|
115 |
+
|
116 |
+
print(processed_df[display_cols].head(10).to_string(index=False, max_colwidth=20))
|
117 |
+
|
118 |
+
# Save results
|
119 |
+
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
120 |
+
output_dir = os.path.dirname(file_path)
|
121 |
+
output_path = os.path.join(output_dir, f"{base_name}_PRIORITY.xlsx")
|
122 |
+
|
123 |
+
try:
|
124 |
+
final_output = processor.save_results(processed_df, output_path, processing_info)
|
125 |
+
print(f"\n💾 Results saved to: {final_output}")
|
126 |
+
print(f"\n📋 Output includes:")
|
127 |
+
print(f" - Priority_Results: Ranked manufacturing data")
|
128 |
+
print(f" - Instructions: Methodology and column explanations")
|
129 |
+
print(f" - Processing_Log: Detailed processing information")
|
130 |
+
|
131 |
+
except Exception as e:
|
132 |
+
print(f"❌ Failed to save results: {e}")
|
133 |
+
return
|
134 |
+
|
135 |
+
print(f"\n🎉 Processing complete! Check the output file for detailed results.")
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
main()
|
output_writer.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# output_writer.py
|
2 |
+
import pandas as pd
|
3 |
+
from datetime import datetime
|
4 |
+
|
5 |
+
def save_with_instructions(df: pd.DataFrame, output_path: str, min_qty: int = 50, weights: dict = None):
|
6 |
+
"""
|
7 |
+
Save the priority results to Excel with an instructions sheet
|
8 |
+
"""
|
9 |
+
if weights is None:
|
10 |
+
weights = {"AGE_WEIGHT": 60, "COMPONENT_WEIGHT": 30, "MANUAL_WEIGHT": 10}
|
11 |
+
|
12 |
+
with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
|
13 |
+
# Save main results
|
14 |
+
df.to_excel(writer, sheet_name='Priority_Results', index=False)
|
15 |
+
|
16 |
+
# Create instructions sheet
|
17 |
+
instructions_data = [
|
18 |
+
['Manufacturing Priority Decision Results'],
|
19 |
+
[''],
|
20 |
+
['METHODOLOGY:'],
|
21 |
+
[f'Age Weight: {weights["AGE_WEIGHT"]}%'],
|
22 |
+
[f'Component Simplicity Weight: {weights["COMPONENT_WEIGHT"]}%'],
|
23 |
+
[f'Manual Priority Weight: {weights["MANUAL_WEIGHT"]}%'],
|
24 |
+
[f'Minimum Quantity Threshold: {min_qty}'],
|
25 |
+
[''],
|
26 |
+
['COLUMNS EXPLANATION:'],
|
27 |
+
['OrderAgeDays: Days since oldest product required'],
|
28 |
+
['ComponentCount: Number of unique components needed'],
|
29 |
+
['QtyThresholdOK: Whether quantity meets minimum threshold'],
|
30 |
+
['AgeScore: Weighted age contribution to priority'],
|
31 |
+
['SimplicityScore: Weighted simplicity contribution to priority'],
|
32 |
+
['ManualScore: Weighted manual priority contribution'],
|
33 |
+
['PriorityScore: Final calculated priority (0-1 scale)'],
|
34 |
+
[''],
|
35 |
+
['Higher PriorityScore = Higher Manufacturing Priority'],
|
36 |
+
[f'Generated on: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}']
|
37 |
+
]
|
38 |
+
|
39 |
+
instructions_df = pd.DataFrame(instructions_data, columns=['Instructions'])
|
40 |
+
instructions_df.to_excel(writer, sheet_name='Instructions', index=False)
|
priority_logic.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
from config import WEIGHTS
|
4 |
+
|
5 |
+
REQUIRED_COLS = [
|
6 |
+
"Name of Product",
|
7 |
+
"Components Used",
|
8 |
+
"Quantity of Each Component",
|
9 |
+
"Oldest Product Required First",
|
10 |
+
"Priority Assigned",
|
11 |
+
]
|
12 |
+
|
13 |
+
def _normalize_columns(df: pd.DataFrame) -> pd.DataFrame:
|
14 |
+
df = df.copy()
|
15 |
+
df.columns = [str(c).strip() for c in df.columns]
|
16 |
+
return df
|
17 |
+
|
18 |
+
def _component_count(series: pd.Series) -> pd.Series:
|
19 |
+
def _count(x):
|
20 |
+
if pd.isna(x):
|
21 |
+
return 0
|
22 |
+
parts = [p.strip() for p in str(x).split(",") if str(p).strip()]
|
23 |
+
return len(set(parts)) if parts else 0
|
24 |
+
return series.apply(_count)
|
25 |
+
|
26 |
+
def compute_priority(df: pd.DataFrame, min_qty: int = 50) -> pd.DataFrame:
|
27 |
+
df = _normalize_columns(df)
|
28 |
+
|
29 |
+
missing = [c for c in REQUIRED_COLS if c not in df.columns]
|
30 |
+
if missing:
|
31 |
+
raise ValueError(f"Missing required columns: {missing}")
|
32 |
+
|
33 |
+
out = df.copy()
|
34 |
+
out["Oldest Product Required First"] = pd.to_datetime(out["Oldest Product Required First"], errors="coerce")
|
35 |
+
|
36 |
+
today = pd.Timestamp.now().normalize()
|
37 |
+
out["OrderAgeDays"] = (today - pd.to_datetime(out["Oldest Product Required First"], errors="coerce")).dt.days
|
38 |
+
out["OrderAgeDays"] = out["OrderAgeDays"].fillna(0).clip(lower=0)
|
39 |
+
|
40 |
+
if out["OrderAgeDays"].max() > 0:
|
41 |
+
age_score_01 = out["OrderAgeDays"] / out["OrderAgeDays"].max()
|
42 |
+
else:
|
43 |
+
age_score_01 = 0
|
44 |
+
|
45 |
+
qty = pd.to_numeric(out["Quantity of Each Component"], errors="coerce").fillna(0)
|
46 |
+
age_score_01 = np.where(qty >= min_qty, age_score_01, 0)
|
47 |
+
|
48 |
+
comp_count = _component_count(out["Components Used"])
|
49 |
+
if comp_count.max() > 0:
|
50 |
+
comp_simplicity_01 = 1 - (comp_count / comp_count.max())
|
51 |
+
else:
|
52 |
+
comp_simplicity_01 = 0
|
53 |
+
|
54 |
+
def manual_to01(x):
|
55 |
+
if pd.isna(x):
|
56 |
+
return 0.0
|
57 |
+
s = str(x).strip().lower()
|
58 |
+
if s in {"high", "urgent", "yes", "y", "true"}:
|
59 |
+
return 1.0
|
60 |
+
try:
|
61 |
+
return 1.0 if float(s) > 0 else 0.0
|
62 |
+
except:
|
63 |
+
return 0.0
|
64 |
+
|
65 |
+
manual_01 = out["Priority Assigned"].apply(manual_to01)
|
66 |
+
|
67 |
+
w_age = WEIGHTS["AGE_WEIGHT"] / 100.0
|
68 |
+
w_comp = WEIGHTS["COMPONENT_WEIGHT"] / 100.0
|
69 |
+
w_man = WEIGHTS["MANUAL_WEIGHT"] / 100.0
|
70 |
+
|
71 |
+
out["AgeScore"] = age_score_01 * w_age
|
72 |
+
if isinstance(comp_simplicity_01, pd.Series):
|
73 |
+
out["SimplicityScore"] = comp_simplicity_01 * w_comp
|
74 |
+
else:
|
75 |
+
out["SimplicityScore"] = comp_simplicity_01
|
76 |
+
|
77 |
+
out["ManualScore"] = manual_01 * w_man
|
78 |
+
|
79 |
+
out["PriorityScore"] = out["AgeScore"] + out["SimplicityScore"] + out["ManualScore"]
|
80 |
+
|
81 |
+
out["ComponentCount"] = comp_count
|
82 |
+
out["QtyThresholdOK"] = qty >= min_qty
|
83 |
+
|
84 |
+
out = out.sort_values(["PriorityScore", "OrderAgeDays"], ascending=[False, False])
|
85 |
+
return out
|
processor.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# processor.py
|
2 |
+
"""
|
3 |
+
Main processing orchestrator that ties together all the manufacturing priority logic.
|
4 |
+
This module provides high-level functions that both the CLI and Gradio interfaces can use.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import pandas as pd
|
9 |
+
from typing import Dict, List, Tuple, Optional
|
10 |
+
from datetime import datetime
|
11 |
+
|
12 |
+
from config import WEIGHTS
|
13 |
+
from sheet_reader import list_sheets, read_sheet
|
14 |
+
from priority_logic import compute_priority
|
15 |
+
from output_writer import save_with_instructions
|
16 |
+
from utils import prompt_weights
|
17 |
+
|
18 |
+
|
19 |
+
class ManufacturingProcessor:
|
20 |
+
"""
|
21 |
+
Main processor class for manufacturing priority calculations.
|
22 |
+
Encapsulates all the logic needed to process Excel files and generate priority rankings.
|
23 |
+
"""
|
24 |
+
|
25 |
+
def __init__(self, weights: Optional[Dict[str, int]] = None):
|
26 |
+
"""Initialize processor with weights"""
|
27 |
+
self.weights = weights or WEIGHTS.copy()
|
28 |
+
self.validate_weights()
|
29 |
+
|
30 |
+
def validate_weights(self) -> None:
|
31 |
+
"""Ensure weights sum to 100"""
|
32 |
+
total = sum(self.weights.values())
|
33 |
+
if total != 100:
|
34 |
+
raise ValueError(f"Weights must sum to 100, got {total}")
|
35 |
+
|
36 |
+
def get_file_info(self, file_path: str) -> Dict:
|
37 |
+
"""Get information about the Excel file"""
|
38 |
+
if not os.path.exists(file_path):
|
39 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
40 |
+
|
41 |
+
try:
|
42 |
+
sheets = list_sheets(file_path)
|
43 |
+
file_size = os.path.getsize(file_path)
|
44 |
+
|
45 |
+
return {
|
46 |
+
"file_path": file_path,
|
47 |
+
"file_name": os.path.basename(file_path),
|
48 |
+
"file_size": file_size,
|
49 |
+
"sheets": sheets,
|
50 |
+
"sheet_count": len(sheets)
|
51 |
+
}
|
52 |
+
except Exception as e:
|
53 |
+
raise Exception(f"Error reading file info: {e}")
|
54 |
+
|
55 |
+
def validate_sheet_data(self, df: pd.DataFrame) -> Dict:
|
56 |
+
"""Validate that the sheet has required columns and data"""
|
57 |
+
from priority_logic import REQUIRED_COLS
|
58 |
+
|
59 |
+
# Normalize column names
|
60 |
+
df_norm = df.copy()
|
61 |
+
df_norm.columns = [str(c).strip() for c in df_norm.columns]
|
62 |
+
|
63 |
+
# Check required columns
|
64 |
+
missing_cols = [col for col in REQUIRED_COLS if col not in df_norm.columns]
|
65 |
+
|
66 |
+
# Basic data validation
|
67 |
+
validation_result = {
|
68 |
+
"valid": len(missing_cols) == 0,
|
69 |
+
"missing_columns": missing_cols,
|
70 |
+
"available_columns": list(df.columns),
|
71 |
+
"row_count": len(df),
|
72 |
+
"empty_rows": df.isnull().all(axis=1).sum(),
|
73 |
+
"data_issues": []
|
74 |
+
}
|
75 |
+
|
76 |
+
if validation_result["valid"]:
|
77 |
+
# Check for data quality issues
|
78 |
+
try:
|
79 |
+
# Check date column
|
80 |
+
date_col = "Oldest Product Required First"
|
81 |
+
date_issues = pd.to_datetime(df_norm[date_col], errors='coerce').isnull().sum()
|
82 |
+
if date_issues > 0:
|
83 |
+
validation_result["data_issues"].append(f"{date_issues} invalid dates in '{date_col}'")
|
84 |
+
|
85 |
+
# Check quantity column
|
86 |
+
qty_col = "Quantity of Each Component"
|
87 |
+
qty_numeric = pd.to_numeric(df_norm[qty_col], errors='coerce')
|
88 |
+
qty_issues = qty_numeric.isnull().sum()
|
89 |
+
if qty_issues > 0:
|
90 |
+
validation_result["data_issues"].append(f"{qty_issues} non-numeric values in '{qty_col}'")
|
91 |
+
|
92 |
+
# Check for completely empty required columns
|
93 |
+
for col in REQUIRED_COLS:
|
94 |
+
if col in df_norm.columns:
|
95 |
+
empty_count = df_norm[col].isnull().sum()
|
96 |
+
if empty_count == len(df_norm):
|
97 |
+
validation_result["data_issues"].append(f"Column '{col}' is completely empty")
|
98 |
+
|
99 |
+
except Exception as e:
|
100 |
+
validation_result["data_issues"].append(f"Data validation error: {e}")
|
101 |
+
|
102 |
+
return validation_result
|
103 |
+
|
104 |
+
def process_file(self,
|
105 |
+
file_path: str,
|
106 |
+
sheet_name: str,
|
107 |
+
min_qty: int = 50,
|
108 |
+
custom_weights: Dict[str, int] = None) -> Tuple[pd.DataFrame, Dict]:
|
109 |
+
"""
|
110 |
+
Process a single sheet from an Excel file and return prioritized results.
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
Tuple of (processed_dataframe, processing_info)
|
114 |
+
"""
|
115 |
+
|
116 |
+
# Use custom weights if provided
|
117 |
+
weights = custom_weights or self.weights
|
118 |
+
if custom_weights:
|
119 |
+
temp_weights = custom_weights.copy()
|
120 |
+
if sum(temp_weights.values()) != 100:
|
121 |
+
raise ValueError("Custom weights must sum to 100")
|
122 |
+
else:
|
123 |
+
temp_weights = weights
|
124 |
+
|
125 |
+
# Read the data
|
126 |
+
df = read_sheet(file_path, sheet_name)
|
127 |
+
if df is None or df.empty:
|
128 |
+
raise ValueError("Sheet is empty or could not be read")
|
129 |
+
|
130 |
+
# Validate data
|
131 |
+
validation = self.validate_sheet_data(df)
|
132 |
+
if not validation["valid"]:
|
133 |
+
raise ValueError(f"Data validation failed: Missing columns {validation['missing_columns']}")
|
134 |
+
|
135 |
+
# Process priority calculation
|
136 |
+
try:
|
137 |
+
processed_df = compute_priority(df, min_qty=min_qty, weights=temp_weights)
|
138 |
+
except Exception as e:
|
139 |
+
raise Exception(f"Priority calculation failed: {e}")
|
140 |
+
|
141 |
+
# Generate processing info
|
142 |
+
processing_info = {
|
143 |
+
"timestamp": datetime.now().isoformat(),
|
144 |
+
"file_name": os.path.basename(file_path),
|
145 |
+
"sheet_name": sheet_name,
|
146 |
+
"weights_used": temp_weights,
|
147 |
+
"min_quantity": min_qty,
|
148 |
+
"total_products": len(df),
|
149 |
+
"products_above_threshold": sum(processed_df["QtyThresholdOK"]),
|
150 |
+
"highest_priority_score": processed_df["PriorityScore"].max(),
|
151 |
+
"lowest_priority_score": processed_df["PriorityScore"].min(),
|
152 |
+
"validation_info": validation
|
153 |
+
}
|
154 |
+
|
155 |
+
return processed_df, processing_info
|
156 |
+
|
157 |
+
def save_results(self,
|
158 |
+
processed_df: pd.DataFrame,
|
159 |
+
output_path: str,
|
160 |
+
processing_info: Dict) -> str:
|
161 |
+
"""Save processed results with full documentation"""
|
162 |
+
|
163 |
+
try:
|
164 |
+
save_with_instructions(
|
165 |
+
processed_df,
|
166 |
+
output_path,
|
167 |
+
min_qty=processing_info["min_quantity"],
|
168 |
+
weights=processing_info["weights_used"]
|
169 |
+
)
|
170 |
+
|
171 |
+
# Add processing log sheet
|
172 |
+
self._add_processing_log(output_path, processing_info)
|
173 |
+
|
174 |
+
return output_path
|
175 |
+
|
176 |
+
except Exception as e:
|
177 |
+
raise Exception(f"Failed to save results: {e}")
|
178 |
+
|
179 |
+
def _add_processing_log(self, output_path: str, processing_info: Dict):
|
180 |
+
"""Add a processing log sheet to the output file"""
|
181 |
+
try:
|
182 |
+
# Read existing file and add log sheet
|
183 |
+
with pd.ExcelWriter(output_path, mode='a', engine='openpyxl', if_sheet_exists='replace') as writer:
|
184 |
+
log_data = []
|
185 |
+
log_data.append(["PROCESSING LOG"])
|
186 |
+
log_data.append([""])
|
187 |
+
log_data.append(["Processing Timestamp", processing_info["timestamp"]])
|
188 |
+
log_data.append(["Source File", processing_info["file_name"]])
|
189 |
+
log_data.append(["Sheet Processed", processing_info["sheet_name"]])
|
190 |
+
log_data.append([""])
|
191 |
+
log_data.append(["SETTINGS USED"])
|
192 |
+
log_data.append(["Age Weight", f"{processing_info['weights_used']['AGE_WEIGHT']}%"])
|
193 |
+
log_data.append(["Component Weight", f"{processing_info['weights_used']['COMPONENT_WEIGHT']}%"])
|
194 |
+
log_data.append(["Manual Weight", f"{processing_info['weights_used']['MANUAL_WEIGHT']}%"])
|
195 |
+
log_data.append(["Minimum Quantity", processing_info["min_quantity"]])
|
196 |
+
log_data.append([""])
|
197 |
+
log_data.append(["RESULTS SUMMARY"])
|
198 |
+
log_data.append(["Total Products", processing_info["total_products"]])
|
199 |
+
log_data.append(["Above Threshold", processing_info["products_above_threshold"]])
|
200 |
+
log_data.append(["Highest Priority Score", f"{processing_info['highest_priority_score']:.4f}"])
|
201 |
+
log_data.append(["Lowest Priority Score", f"{processing_info['lowest_priority_score']:.4f}"])
|
202 |
+
|
203 |
+
if processing_info["validation_info"]["data_issues"]:
|
204 |
+
log_data.append([""])
|
205 |
+
log_data.append(["DATA ISSUES FOUND"])
|
206 |
+
for issue in processing_info["validation_info"]["data_issues"]:
|
207 |
+
log_data.append(["", issue])
|
208 |
+
|
209 |
+
log_df = pd.DataFrame(log_data, columns=["Parameter", "Value"])
|
210 |
+
log_df.to_excel(writer, sheet_name='Processing_Log', index=False)
|
211 |
+
|
212 |
+
except Exception as e:
|
213 |
+
# If adding log fails, don't fail the whole operation
|
214 |
+
print(f"Warning: Could not add processing log: {e}")
|
215 |
+
|
216 |
+
|
217 |
+
# Convenience functions for easy import
|
218 |
+
def quick_process(file_path: str,
|
219 |
+
sheet_name: str,
|
220 |
+
output_path: str = None,
|
221 |
+
min_qty: int = 50,
|
222 |
+
weights: Optional[Dict[str, int]] = None) -> str:
|
223 |
+
"""
|
224 |
+
Quick processing function that handles the full workflow.
|
225 |
+
|
226 |
+
Args:
|
227 |
+
file_path: Path to Excel file
|
228 |
+
sheet_name: Name of sheet to process
|
229 |
+
output_path: Where to save results (optional, will auto-generate if not provided)
|
230 |
+
min_qty: Minimum quantity threshold
|
231 |
+
weights: Custom weights dict (optional)
|
232 |
+
|
233 |
+
Returns:
|
234 |
+
Path to generated output file
|
235 |
+
"""
|
236 |
+
processor = ManufacturingProcessor(weights)
|
237 |
+
|
238 |
+
# Process the data
|
239 |
+
processed_df, processing_info = processor.process_file(
|
240 |
+
file_path, sheet_name, min_qty, weights
|
241 |
+
)
|
242 |
+
|
243 |
+
# Generate output path if not provided
|
244 |
+
if output_path is None:
|
245 |
+
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
246 |
+
output_dir = os.path.dirname(file_path)
|
247 |
+
output_path = os.path.join(output_dir, f"{base_name}_PRIORITY.xlsx")
|
248 |
+
|
249 |
+
# Save results
|
250 |
+
return processor.save_results(processed_df, output_path, processing_info)
|
251 |
+
|
252 |
+
|
253 |
+
def get_file_preview(file_path: str, sheet_name: str, max_rows: int = 5) -> Dict:
|
254 |
+
"""
|
255 |
+
Get a preview of the file data for validation purposes.
|
256 |
+
|
257 |
+
Returns:
|
258 |
+
Dict containing preview info and sample data
|
259 |
+
"""
|
260 |
+
processor = ManufacturingProcessor()
|
261 |
+
|
262 |
+
# Get file info
|
263 |
+
file_info = processor.get_file_info(file_path)
|
264 |
+
|
265 |
+
# Read sample data
|
266 |
+
df = read_sheet(file_path, sheet_name)
|
267 |
+
sample_df = df.head(max_rows) if df is not None else pd.DataFrame()
|
268 |
+
|
269 |
+
# Validate data
|
270 |
+
validation = processor.validate_sheet_data(df) if df is not None else {"valid": False}
|
271 |
+
|
272 |
+
return {
|
273 |
+
"file_info": file_info,
|
274 |
+
"sample_data": sample_df,
|
275 |
+
"validation": validation,
|
276 |
+
"preview_rows": len(sample_df)
|
277 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.0.0
|
2 |
+
pandas>=1.5.0
|
3 |
+
openpyxl>=3.0.0
|
4 |
+
numpy>=1.20.0
|
sheet_reader.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# sheet_reader.py
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
def list_sheets(file_path: str):
|
5 |
+
xls = pd.ExcelFile(file_path)
|
6 |
+
return xls.sheet_names
|
7 |
+
|
8 |
+
def read_sheet(file_path: str, sheet_name: str) -> pd.DataFrame:
|
9 |
+
return pd.read_excel(file_path, sheet_name=sheet_name)
|
utils.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# utils.py
|
2 |
+
def prompt_weights(default_weights: dict) -> dict:
|
3 |
+
"""
|
4 |
+
Interactively collect weights from the user (in %) and validate that they sum to 100.
|
5 |
+
Press Enter to keep defaults.
|
6 |
+
"""
|
7 |
+
print("\\n⚖️ Set Weights (press Enter to keep default values)")
|
8 |
+
new_w = {}
|
9 |
+
for key, val in default_weights.items():
|
10 |
+
try:
|
11 |
+
raw = input(f" {key.replace('_WEIGHT','').title()} (%), default {val}: ").strip()
|
12 |
+
new_w[key] = val if raw == "" else int(raw)
|
13 |
+
except ValueError:
|
14 |
+
print(f" Invalid input for {key}, keeping default {val}.")
|
15 |
+
new_w[key] = val
|
16 |
+
|
17 |
+
total = sum(new_w.values())
|
18 |
+
if total != 100:
|
19 |
+
print(f" Weights sum to {total}, normalizing to 100 proportionally.")
|
20 |
+
factor = 100.0 / total if total else 0
|
21 |
+
for k in new_w:
|
22 |
+
new_w[k] = int(round(new_w[k] * factor))
|
23 |
+
# ensure exact 100 by adjusting the largest key if rounding drift
|
24 |
+
drift = 100 - sum(new_w.values())
|
25 |
+
if drift != 0:
|
26 |
+
largest_key = max(new_w.keys(), key=new_w.get)
|
27 |
+
new_w[largest_key] += drift
|
28 |
+
|
29 |
+
print(" Final Weights:", new_w)
|
30 |
+
return new_w
|