Spaces:
Sleeping
Sleeping
Pragya Jatav
commited on
Commit
·
803ac82
1
Parent(s):
a582e50
m1
Browse files- Model_Result_Overview.py +134 -12
- Streamlit_functions.py +117 -30
- __pycache__/Streamlit_functions.cpython-310.pyc +0 -0
- __pycache__/classes.cpython-310.pyc +0 -0
- __pycache__/response_curves_model_quality.cpython-310.pyc +0 -0
- __pycache__/response_curves_model_quality_base.cpython-310.pyc +0 -0
- __pycache__/utilities.cpython-310.pyc +0 -0
- __pycache__/utilities_with_panel.cpython-310.pyc +0 -0
- classes.py +93 -72
- pages/1_Model_Quality.py +131 -2
- pages/2_Scenario_Planner.py +279 -70
- pages/3_Saved_Scenarios.py +134 -11
- response_curves_model_quality.py +20 -19
- response_curves_model_quality_base.py +10 -8
- summary_df.pkl +1 -1
- utilities.py +13 -13
- utilities_with_panel.py +15 -15
Model_Result_Overview.py
CHANGED
@@ -24,18 +24,123 @@ import yaml
|
|
24 |
from yaml import SafeLoader
|
25 |
import time
|
26 |
from datetime import datetime,timedelta
|
27 |
-
|
|
|
|
|
|
|
|
|
28 |
st.set_page_config(layout='wide')
|
29 |
load_local_css('styles.css')
|
30 |
set_header()
|
31 |
|
32 |
st.title("Model Result Overview")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def get_random_effects(media_data, panel_col, mdf):
|
35 |
random_eff_df = pd.DataFrame(columns=[panel_col, "random_effect"])
|
36 |
|
37 |
for i, market in enumerate(media_data[panel_col].unique()):
|
38 |
-
# print(i, end='\r')
|
39 |
intercept = mdf.random_effects[market].values[0]
|
40 |
random_eff_df.loc[i, 'random_effect'] = intercept
|
41 |
random_eff_df.loc[i, panel_col] = market
|
@@ -130,12 +235,14 @@ if auth_status:
|
|
130 |
# fig = sf.pie_contributions(start_date,end_date)
|
131 |
# st.plotly_chart(fig,use_container_width=True)
|
132 |
# st.header("Distribution of Spends and Contributions")
|
133 |
-
|
134 |
-
st.plotly_chart(
|
135 |
|
136 |
## Channel Contribution Bar Chart
|
137 |
-
|
138 |
-
st.plotly_chart(
|
|
|
|
|
139 |
# Format first three rows in percentage format
|
140 |
# styled_df = sf.shares_table_func(shares_df)
|
141 |
# # styled_df = styled_df.round(0).astype(int)
|
@@ -146,15 +253,18 @@ if auth_status:
|
|
146 |
|
147 |
# st.table(styled_df)
|
148 |
shares_df = sf.shares_df_func(start_date,end_date)
|
|
|
149 |
st.dataframe(sf.shares_table_func(shares_df),use_container_width=True)
|
150 |
-
|
151 |
st.dataframe(sf.eff_table_func(shares_df).style.format({"TOTAL SPEND": "{:,.0f}", "TOTAL SUPPORT": "{:,.0f}", "TOTAL CONTRIBUTION": "{:,.0f}"}),use_container_width=True)
|
152 |
|
153 |
### CPP CHART
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
with st.expander("View Change in MMM Estimated Prospect Contributions Analysis"):
|
|
|
|
|
158 |
# Dropdown menu options
|
159 |
st.markdown("<h1 style='font-size:28px;'>Change in MMM Estimated Prospect Contributions</h1>", unsafe_allow_html=True)
|
160 |
if data_selection_type == "Compare Monthly Change":
|
@@ -255,8 +365,20 @@ if auth_status:
|
|
255 |
|
256 |
with st.expander("View Decomposition Analysis"):
|
257 |
### Base decomp CHART
|
258 |
-
|
|
|
259 |
|
260 |
### Media decomp CHART
|
261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
|
|
24 |
from yaml import SafeLoader
|
25 |
import time
|
26 |
from datetime import datetime,timedelta
|
27 |
+
from pptx import Presentation
|
28 |
+
from pptx.util import Inches
|
29 |
+
from io import BytesIO
|
30 |
+
import plotly.io as pio
|
31 |
+
import response_curves_model_quality as rc1
|
32 |
st.set_page_config(layout='wide')
|
33 |
load_local_css('styles.css')
|
34 |
set_header()
|
35 |
|
36 |
st.title("Model Result Overview")
|
37 |
+
def add_plotly_chart_to_slide(slide, fig, left, top, width, height):
|
38 |
+
img_stream = BytesIO()
|
39 |
+
pio.write_image(fig, img_stream, format='png',engine="orca")
|
40 |
+
slide.shapes.add_picture(img_stream, left, top, width, height)
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
def save_table(df,prs):
|
45 |
+
# Add a blank slide
|
46 |
+
slide = prs.slides.add_slide(prs.slide_layouts[6])
|
47 |
+
|
48 |
+
rows, cols = df.shape[0] + 1, df.shape[1] # +1 for the header row
|
49 |
+
table = slide.shapes.add_table(rows, cols, Inches(1), Inches(1), Inches(10), Inches(7)).table
|
50 |
+
|
51 |
+
# Set the header row
|
52 |
+
for col_idx, col_name in enumerate(df.columns):
|
53 |
+
table.cell(0, col_idx).text = col_name
|
54 |
+
|
55 |
+
# Add the DataFrame rows to the table
|
56 |
+
for row_idx, row in df.iterrows():
|
57 |
+
for col_idx, value in enumerate(row):
|
58 |
+
# # print(value)
|
59 |
+
if isinstance(value, int):
|
60 |
+
table.cell(row_idx + 1, col_idx).text = str(value)
|
61 |
+
|
62 |
+
|
63 |
+
def save_ppt_file(fig1,fig2,fig3,fig4,fig6,fig7,figw,start_date,end_date,shares_df1,shares_df2):
|
64 |
+
# Initialize PowerPoint presentation
|
65 |
+
prs = Presentation()
|
66 |
+
|
67 |
+
# save_table(shares_df1,prs)
|
68 |
+
# save_table(shares_df2,prs)
|
69 |
+
# Slide 1: Model Quality with Chart
|
70 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
71 |
+
# title_1 = slide_1.shapes.title
|
72 |
+
# title_1.text = "Distribution Of Spends And Prospects"
|
73 |
+
# Add the Plotly chart to the slide
|
74 |
+
add_plotly_chart_to_slide(slide_1, sf.pie_contributions(start_date,end_date), Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
75 |
+
add_plotly_chart_to_slide(prs.slides.add_slide(prs.slide_layouts[6]), sf.pie_spend(start_date,end_date), Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
76 |
+
# Slide 2: Media Data Elasticity
|
77 |
+
slide_2 = prs.slides.add_slide(prs.slide_layouts[6])
|
78 |
+
# title_2 = slide_2.shapes.title
|
79 |
+
# title_2.text = "Media Contribution"
|
80 |
+
add_plotly_chart_to_slide(slide_2, fig2, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
81 |
+
slide_3 = prs.slides.add_slide(prs.slide_layouts[6])
|
82 |
+
# title_3 = slide_3.shapes.title
|
83 |
+
# title_3.text = "Media Spends"
|
84 |
+
add_plotly_chart_to_slide(slide_3, fig3, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
85 |
+
slide_4 = prs.slides.add_slide(prs.slide_layouts[6])
|
86 |
+
# title_4 = slide_4.shapes.title
|
87 |
+
# title_4.text = "CPP Distribution"
|
88 |
+
add_plotly_chart_to_slide(slide_4, fig4, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
89 |
+
|
90 |
+
if figw != None:
|
91 |
+
slide_5 = prs.slides.add_slide(prs.slide_layouts[6])
|
92 |
+
# title_5 = slide_5.shapes.title
|
93 |
+
# title_5.text = "Change in MMM Estimated Prospect Contributions"
|
94 |
+
figw.update_layout(
|
95 |
+
# title="Distribution Of Spends"
|
96 |
+
title={
|
97 |
+
'text': "Change In MMM Estimated Prospect Contribution",
|
98 |
+
'font': {
|
99 |
+
'size': 24,
|
100 |
+
'family': 'Arial',
|
101 |
+
'color': 'black',
|
102 |
+
# 'bold': True
|
103 |
+
}
|
104 |
+
}
|
105 |
+
|
106 |
+
)
|
107 |
+
add_plotly_chart_to_slide(slide_5, figw, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
108 |
+
else :
|
109 |
+
slide_5 = prs.slides.add_slide(prs.slide_layouts[5])
|
110 |
+
title_5 = slide_5.shapes.title
|
111 |
+
title_5.text = "Change in MMM Estimated Prospect Contributions"
|
112 |
+
|
113 |
+
slide_6 = prs.slides.add_slide(prs.slide_layouts[6])
|
114 |
+
# title_6 = slide_6.shapes.title
|
115 |
+
# title_6.text = "Base Decomposition"
|
116 |
+
add_plotly_chart_to_slide(slide_6, fig6, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
117 |
+
|
118 |
+
slide_7 = prs.slides.add_slide(prs.slide_layouts[6])
|
119 |
+
# title_7 = slide_7.shapes.title
|
120 |
+
# title_7.text = "Media Decomposition"
|
121 |
+
add_plotly_chart_to_slide(slide_7, fig7, Inches(0.25), Inches(0.25), width=Inches(9.25), height=Inches(6.75))
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
# prs.save('MMM_Model_Result Overview.pptx')
|
129 |
+
|
130 |
+
# print("PowerPoint slides created successfully.")
|
131 |
+
|
132 |
+
# Save to a BytesIO object
|
133 |
+
ppt_stream = BytesIO()
|
134 |
+
prs.save(ppt_stream)
|
135 |
+
ppt_stream.seek(0)
|
136 |
+
|
137 |
+
return ppt_stream.getvalue()
|
138 |
|
139 |
def get_random_effects(media_data, panel_col, mdf):
|
140 |
random_eff_df = pd.DataFrame(columns=[panel_col, "random_effect"])
|
141 |
|
142 |
for i, market in enumerate(media_data[panel_col].unique()):
|
143 |
+
# # print(i, end='\r')
|
144 |
intercept = mdf.random_effects[market].values[0]
|
145 |
random_eff_df.loc[i, 'random_effect'] = intercept
|
146 |
random_eff_df.loc[i, panel_col] = market
|
|
|
235 |
# fig = sf.pie_contributions(start_date,end_date)
|
236 |
# st.plotly_chart(fig,use_container_width=True)
|
237 |
# st.header("Distribution of Spends and Contributions")
|
238 |
+
fig1 = sf.pie_charts(start_date,end_date)
|
239 |
+
st.plotly_chart(fig1,use_container_width=True)
|
240 |
|
241 |
## Channel Contribution Bar Chart
|
242 |
+
fig2 =sf.channel_contribution(start_date,end_date)
|
243 |
+
st.plotly_chart(fig2,use_container_width=True)
|
244 |
+
fig3 = sf.chanel_spends(start_date,end_date)
|
245 |
+
st.plotly_chart(fig3,use_container_width=True)
|
246 |
# Format first three rows in percentage format
|
247 |
# styled_df = sf.shares_table_func(shares_df)
|
248 |
# # styled_df = styled_df.round(0).astype(int)
|
|
|
253 |
|
254 |
# st.table(styled_df)
|
255 |
shares_df = sf.shares_df_func(start_date,end_date)
|
256 |
+
shares_df1 = sf.shares_table_func(shares_df)
|
257 |
st.dataframe(sf.shares_table_func(shares_df),use_container_width=True)
|
258 |
+
shares_df2 = sf.eff_table_func(shares_df)
|
259 |
st.dataframe(sf.eff_table_func(shares_df).style.format({"TOTAL SPEND": "{:,.0f}", "TOTAL SUPPORT": "{:,.0f}", "TOTAL CONTRIBUTION": "{:,.0f}"}),use_container_width=True)
|
260 |
|
261 |
### CPP CHART
|
262 |
+
fig4 = sf.cpp(start_date,end_date)
|
263 |
+
st.plotly_chart(fig4,use_container_width=True)
|
264 |
+
|
265 |
with st.expander("View Change in MMM Estimated Prospect Contributions Analysis"):
|
266 |
+
data_selection_type = st.radio("Select Input Type",["Compare Monthly Change", "Compare Custom Range"])
|
267 |
+
waterfall_start_date,waterfall_end_date = start_date,end_date
|
268 |
# Dropdown menu options
|
269 |
st.markdown("<h1 style='font-size:28px;'>Change in MMM Estimated Prospect Contributions</h1>", unsafe_allow_html=True)
|
270 |
if data_selection_type == "Compare Monthly Change":
|
|
|
365 |
|
366 |
with st.expander("View Decomposition Analysis"):
|
367 |
### Base decomp CHART
|
368 |
+
fig6 = sf.base_decomp()
|
369 |
+
st.plotly_chart(fig6,use_container_width=True)
|
370 |
|
371 |
### Media decomp CHART
|
372 |
+
fig7 = sf.media_decomp()
|
373 |
+
st.plotly_chart(fig7,use_container_width=True)
|
374 |
+
|
375 |
+
if st.button("Prepare Download Of Analysis"):
|
376 |
+
ppt_file = save_ppt_file(fig1,fig2,fig3,fig4,fig6,fig7,figw,start_date,end_date,shares_df1,shares_df2)
|
377 |
+
# Add a download button
|
378 |
+
st.download_button(
|
379 |
+
label="Download Analysis",
|
380 |
+
data=ppt_file,
|
381 |
+
file_name="MMM_Model_Result Overview.pptx",
|
382 |
+
mime="application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
383 |
+
)
|
384 |
|
Streamlit_functions.py
CHANGED
@@ -128,6 +128,24 @@ def pie_charts(start_date,end_date):
|
|
128 |
data2.index = channels
|
129 |
data2.columns = ["p"]
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
fig = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}, {'type':'domain'}]])
|
132 |
|
133 |
fig.add_trace(go.Pie(labels=channels,
|
@@ -137,6 +155,7 @@ def pie_charts(start_date,end_date):
|
|
137 |
textinfo= 'label+percent',
|
138 |
showlegend= False,textfont=dict(size =10),
|
139 |
title="Distribution of Spends"
|
|
|
140 |
), 1, 1)
|
141 |
|
142 |
fig.add_trace(go.Pie(labels=channels,
|
@@ -146,7 +165,7 @@ def pie_charts(start_date,end_date):
|
|
146 |
textinfo= 'label+percent',
|
147 |
showlegend= False,
|
148 |
textfont=dict(size = 10),
|
149 |
-
title = "Distribution of Prospect Contributions"
|
150 |
), 1, 2)
|
151 |
# fig.update_layout(
|
152 |
# title="Distribution Of Spends And Prospect Contributions"
|
@@ -180,6 +199,22 @@ def pie_charts(start_date,end_date):
|
|
180 |
return fig
|
181 |
|
182 |
def pie_spend(start_date,end_date):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
start_date = pd.to_datetime(start_date)
|
184 |
end_date = pd.to_datetime(end_date)
|
185 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
@@ -194,26 +229,26 @@ def pie_spend(start_date,end_date):
|
|
194 |
textinfo= 'label+percent',
|
195 |
showlegend= False,
|
196 |
textfont=dict(size = 10)
|
197 |
-
|
198 |
)])
|
199 |
|
200 |
# Customize the layout
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
|
213 |
-
|
214 |
|
215 |
fig.add_annotation(
|
216 |
-
text=f"
|
217 |
x=0,
|
218 |
y=1.15,
|
219 |
xref="x domain",
|
@@ -226,6 +261,21 @@ def pie_spend(start_date,end_date):
|
|
226 |
# Show the figure
|
227 |
return fig
|
228 |
def pie_contributions(start_date,end_date):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
start_date = pd.to_datetime(start_date)
|
230 |
end_date = pd.to_datetime(end_date)
|
231 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
@@ -241,16 +291,35 @@ def pie_contributions(start_date,end_date):
|
|
241 |
textposition='auto',
|
242 |
showlegend= False,
|
243 |
textfont=dict(size = 10)
|
244 |
-
|
245 |
)])
|
246 |
|
247 |
# fig.add_annotation(showarrow=False)
|
248 |
# Customize the layout
|
249 |
fig.update_layout(
|
250 |
-
title="Distribution Of Contributions",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
# margin=dict(t=0, b=0, l=0, r=0)
|
252 |
)
|
253 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
|
255 |
# Show the figure
|
256 |
return fig
|
@@ -404,8 +473,8 @@ def waterfall2(start_date1,end_date1,start_date2,end_date2):
|
|
404 |
font=dict(size=16),
|
405 |
# align='left'
|
406 |
)
|
407 |
-
# # print(cur_data)
|
408 |
-
# # print(prev_data)
|
409 |
# fig.show()
|
410 |
return fig
|
411 |
def waterfall(start_date,end_date,btn_chart):
|
@@ -555,8 +624,8 @@ def waterfall(start_date,end_date,btn_chart):
|
|
555 |
font=dict(size=16),
|
556 |
# align='left'
|
557 |
)
|
558 |
-
# # print(cur_data)
|
559 |
-
# # print(prev_data)
|
560 |
# fig.show()
|
561 |
return fig
|
562 |
|
@@ -855,12 +924,27 @@ def cpp(start_date,end_date):
|
|
855 |
'rgba(240, 230, 140, 0.8)', # Khaki
|
856 |
'rgba(218, 112, 214, 0.8)'
|
857 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
858 |
|
859 |
for i in range(0,13):
|
860 |
cpp_df = cur_data[['Date',spend_cols[i],contribution_cols[i]]]
|
861 |
cpp_df[channels[i]+"_cpp"] = cpp_df[spend_cols[i]]/cpp_df[contribution_cols[i]]
|
862 |
# Add each line trace
|
863 |
-
fig.add_trace(go.Scatter(x=cpp_df['Date'], y=cpp_df[channels[i]+"_cpp"], mode='lines', name=channels[i]))
|
864 |
|
865 |
# Update layout for better visualization
|
866 |
fig.update_layout(
|
@@ -915,11 +999,14 @@ def base_decomp():
|
|
915 |
|
916 |
base_decomp_df = df[['Date','Unemployment', 'Competition','Trend','Seasonality','Base_0']]
|
917 |
fig = go.Figure()
|
918 |
-
|
|
|
|
|
|
|
919 |
# Add each line trace
|
920 |
-
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Base_0'], mode='lines', name='Trend and Seasonality'))
|
921 |
-
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Unemployment'], mode='lines', name='Unemployment'))
|
922 |
-
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Competition'], mode='lines', name='Competition'))
|
923 |
|
924 |
# Update layout for better visualization
|
925 |
fig.update_layout(
|
@@ -997,7 +1084,7 @@ def media_decomp():
|
|
997 |
|
998 |
media_cols = media_decomp_df.columns
|
999 |
for i in range(2,len(media_cols)):
|
1000 |
-
# # print(media_cols[i])
|
1001 |
cumulative_df[media_cols[i]] = cumulative_df[media_cols[i]] + cumulative_df[media_cols[i-1]]
|
1002 |
# cumulative_df
|
1003 |
|
@@ -1079,8 +1166,8 @@ def mmm_model_quality():
|
|
1079 |
fig = go.Figure()
|
1080 |
|
1081 |
# Add each line trace
|
1082 |
-
fig.add_trace(go.Scatter(x=base_df['Date'], y=base_df['Y_hat'], mode='lines', name='Predicted'))
|
1083 |
-
fig.add_trace(go.Scatter(x=base_df['Date'], y=base_df['Y'], mode='lines', name='Actual (Prospect)'))
|
1084 |
|
1085 |
|
1086 |
# Update layout for better visualization
|
@@ -1467,7 +1554,7 @@ def scenario_spend_forecasting2(delta_df,start_date,end_date):
|
|
1467 |
return "Invalid month number"
|
1468 |
|
1469 |
data2["Month year"] = data2["Month"].apply(get_month_name) + ' ' +(data2["Date"].dt.year+1).astype(str)
|
1470 |
-
# print(data2.columns)
|
1471 |
data2 = data2[['Month year' ,'BROADCAST TV', 'CABLE TV',
|
1472 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1473 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
|
|
128 |
data2.index = channels
|
129 |
data2.columns = ["p"]
|
130 |
|
131 |
+
colors = ['#ff2b2b', # Pastel Peach
|
132 |
+
'#0068c9', # Pastel Blue
|
133 |
+
'#83c9ff', # Pastel Pink
|
134 |
+
|
135 |
+
'#ffabab', # Pastel Purple
|
136 |
+
'#29b09d', # Pastel Green
|
137 |
+
'#7defa1', # Pastel Yellow
|
138 |
+
'#ff8700', # Pastel Gray
|
139 |
+
'#ffd16a', # Pastel Red
|
140 |
+
'#6d3fc0', # Pastel Rose
|
141 |
+
'#d5dae5', # Pastel Lavender
|
142 |
+
'#309bff', # Pastel Mauve
|
143 |
+
'#e9f5ff', # Pastel Beige
|
144 |
+
'#BEBADA' # Pastel Lilac
|
145 |
+
]
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
fig = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}, {'type':'domain'}]])
|
150 |
|
151 |
fig.add_trace(go.Pie(labels=channels,
|
|
|
155 |
textinfo= 'label+percent',
|
156 |
showlegend= False,textfont=dict(size =10),
|
157 |
title="Distribution of Spends"
|
158 |
+
, marker=dict(colors=colors)
|
159 |
), 1, 1)
|
160 |
|
161 |
fig.add_trace(go.Pie(labels=channels,
|
|
|
165 |
textinfo= 'label+percent',
|
166 |
showlegend= False,
|
167 |
textfont=dict(size = 10),
|
168 |
+
title = "Distribution of Prospect Contributions", marker=dict(colors=colors)
|
169 |
), 1, 2)
|
170 |
# fig.update_layout(
|
171 |
# title="Distribution Of Spends And Prospect Contributions"
|
|
|
199 |
return fig
|
200 |
|
201 |
def pie_spend(start_date,end_date):
|
202 |
+
colors = ['#ff2b2b', # Pastel Peach
|
203 |
+
'#0068c9', # Pastel Blue
|
204 |
+
'#83c9ff', # Pastel Pink
|
205 |
+
|
206 |
+
'#ffabab', # Pastel Purple
|
207 |
+
'#29b09d', # Pastel Green
|
208 |
+
'#7defa1', # Pastel Yellow
|
209 |
+
'#ff8700', # Pastel Gray
|
210 |
+
'#ffd16a', # Pastel Red
|
211 |
+
'#6d3fc0', # Pastel Rose
|
212 |
+
'#d5dae5', # Pastel Lavender
|
213 |
+
'#309bff', # Pastel Mauve
|
214 |
+
'#e9f5ff', # Pastel Beige
|
215 |
+
'#BEBADA' # Pastel Lilac
|
216 |
+
]
|
217 |
+
|
218 |
start_date = pd.to_datetime(start_date)
|
219 |
end_date = pd.to_datetime(end_date)
|
220 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
|
|
229 |
textinfo= 'label+percent',
|
230 |
showlegend= False,
|
231 |
textfont=dict(size = 10)
|
232 |
+
, marker=dict(colors=colors)
|
233 |
)])
|
234 |
|
235 |
# Customize the layout
|
236 |
+
fig.update_layout(
|
237 |
+
# title="Distribution Of Spends"
|
238 |
+
title={
|
239 |
+
'text': "Distribution Of Spends",
|
240 |
+
'font': {
|
241 |
+
'size': 24,
|
242 |
+
'family': 'Arial',
|
243 |
+
'color': 'black',
|
244 |
+
# 'bold': True
|
245 |
+
}
|
246 |
+
}
|
247 |
|
248 |
+
)
|
249 |
|
250 |
fig.add_annotation(
|
251 |
+
text=f"{start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}",
|
252 |
x=0,
|
253 |
y=1.15,
|
254 |
xref="x domain",
|
|
|
261 |
# Show the figure
|
262 |
return fig
|
263 |
def pie_contributions(start_date,end_date):
|
264 |
+
colors = ['#ff2b2b', # Pastel Peach
|
265 |
+
'#0068c9', # Pastel Blue
|
266 |
+
'#83c9ff', # Pastel Pink
|
267 |
+
|
268 |
+
'#ffabab', # Pastel Purple
|
269 |
+
'#29b09d', # Pastel Green
|
270 |
+
'#7defa1', # Pastel Yellow
|
271 |
+
'#ff8700', # Pastel Gray
|
272 |
+
'#ffd16a', # Pastel Red
|
273 |
+
'#6d3fc0', # Pastel Rose
|
274 |
+
'#d5dae5', # Pastel Lavender
|
275 |
+
'#309bff', # Pastel Mauve
|
276 |
+
'#e9f5ff', # Pastel Beige
|
277 |
+
'#BEBADA' # Pastel Lilac
|
278 |
+
]
|
279 |
start_date = pd.to_datetime(start_date)
|
280 |
end_date = pd.to_datetime(end_date)
|
281 |
cur_data = df[(df['Date'] >= start_date) & (df['Date'] <= end_date)]
|
|
|
291 |
textposition='auto',
|
292 |
showlegend= False,
|
293 |
textfont=dict(size = 10)
|
294 |
+
, marker=dict(colors=colors)
|
295 |
)])
|
296 |
|
297 |
# fig.add_annotation(showarrow=False)
|
298 |
# Customize the layout
|
299 |
fig.update_layout(
|
300 |
+
# title="Distribution Of Contributions",
|
301 |
+
title={
|
302 |
+
'text': "Distribution of Prospects",
|
303 |
+
'font': {
|
304 |
+
'size': 24,
|
305 |
+
'family': 'Arial',
|
306 |
+
'color': 'black',
|
307 |
+
# 'bold': True
|
308 |
+
}
|
309 |
+
}
|
310 |
# margin=dict(t=0, b=0, l=0, r=0)
|
311 |
)
|
312 |
|
313 |
+
fig.add_annotation(
|
314 |
+
text=f"{start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}",
|
315 |
+
x=0,
|
316 |
+
y=1.15,
|
317 |
+
xref="x domain",
|
318 |
+
yref="y domain",
|
319 |
+
showarrow=False,
|
320 |
+
font=dict(size=18),
|
321 |
+
# align='left'
|
322 |
+
)
|
323 |
|
324 |
# Show the figure
|
325 |
return fig
|
|
|
473 |
font=dict(size=16),
|
474 |
# align='left'
|
475 |
)
|
476 |
+
# # # print(cur_data)
|
477 |
+
# # # print(prev_data)
|
478 |
# fig.show()
|
479 |
return fig
|
480 |
def waterfall(start_date,end_date,btn_chart):
|
|
|
624 |
font=dict(size=16),
|
625 |
# align='left'
|
626 |
)
|
627 |
+
# # # print(cur_data)
|
628 |
+
# # # print(prev_data)
|
629 |
# fig.show()
|
630 |
return fig
|
631 |
|
|
|
924 |
'rgba(240, 230, 140, 0.8)', # Khaki
|
925 |
'rgba(218, 112, 214, 0.8)'
|
926 |
]
|
927 |
+
colors = ['#ff2b2b', # Pastel Peach
|
928 |
+
'#0068c9', # Pastel Blue
|
929 |
+
'#83c9ff', # Pastel Pink
|
930 |
+
|
931 |
+
'#ffabab', # Pastel Purple
|
932 |
+
'#29b09d', # Pastel Green
|
933 |
+
'#7defa1', # Pastel Yellow
|
934 |
+
'#ff8700', # Pastel Gray
|
935 |
+
'#ffd16a', # Pastel Red
|
936 |
+
'#6d3fc0', # Pastel Rose
|
937 |
+
'#d5dae5', # Pastel Lavender
|
938 |
+
'#309bff', # Pastel Mauve
|
939 |
+
'#e9f5ff', # Pastel Beige
|
940 |
+
'#BEBADA' # Pastel Lilac
|
941 |
+
]
|
942 |
|
943 |
for i in range(0,13):
|
944 |
cpp_df = cur_data[['Date',spend_cols[i],contribution_cols[i]]]
|
945 |
cpp_df[channels[i]+"_cpp"] = cpp_df[spend_cols[i]]/cpp_df[contribution_cols[i]]
|
946 |
# Add each line trace
|
947 |
+
fig.add_trace(go.Scatter(x=cpp_df['Date'], y=cpp_df[channels[i]+"_cpp"], mode='lines', name=channels[i], line=dict(color=colors[i])))
|
948 |
|
949 |
# Update layout for better visualization
|
950 |
fig.update_layout(
|
|
|
999 |
|
1000 |
base_decomp_df = df[['Date','Unemployment', 'Competition','Trend','Seasonality','Base_0']]
|
1001 |
fig = go.Figure()
|
1002 |
+
colors = ['#ff2b2b', # Pastel Peach
|
1003 |
+
'#0068c9', # Pastel Blue
|
1004 |
+
'#83c9ff', # Pastel Pink
|
1005 |
+
]
|
1006 |
# Add each line trace
|
1007 |
+
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Base_0'], mode='lines', name='Trend and Seasonality',line=dict(color=colors[0])))
|
1008 |
+
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Unemployment'], mode='lines', name='Unemployment',line=dict(color=colors[1])))
|
1009 |
+
fig.add_trace(go.Scatter(x=base_decomp_df['Date'], y=base_decomp_df['Competition'], mode='lines', name='Competition',line=dict(color=colors[2])))
|
1010 |
|
1011 |
# Update layout for better visualization
|
1012 |
fig.update_layout(
|
|
|
1084 |
|
1085 |
media_cols = media_decomp_df.columns
|
1086 |
for i in range(2,len(media_cols)):
|
1087 |
+
# # # print(media_cols[i])
|
1088 |
cumulative_df[media_cols[i]] = cumulative_df[media_cols[i]] + cumulative_df[media_cols[i-1]]
|
1089 |
# cumulative_df
|
1090 |
|
|
|
1166 |
fig = go.Figure()
|
1167 |
|
1168 |
# Add each line trace
|
1169 |
+
fig.add_trace(go.Scatter(x=base_df['Date'], y=base_df['Y_hat'], mode='lines', name='Predicted',line=dict(color='#CC5500') ))
|
1170 |
+
fig.add_trace(go.Scatter(x=base_df['Date'], y=base_df['Y'], mode='lines', name='Actual (Prospect)',line=dict(color='#4B88FF')))
|
1171 |
|
1172 |
|
1173 |
# Update layout for better visualization
|
|
|
1554 |
return "Invalid month number"
|
1555 |
|
1556 |
data2["Month year"] = data2["Month"].apply(get_month_name) + ' ' +(data2["Date"].dt.year+1).astype(str)
|
1557 |
+
# # print(data2.columns)
|
1558 |
data2 = data2[['Month year' ,'BROADCAST TV', 'CABLE TV',
|
1559 |
'CONNECTED & OTT TV', 'VIDEO', 'DISPLAY PROSPECTING',
|
1560 |
'DISPLAY RETARGETING', 'SOCIAL PROSPECTING', 'SOCIAL RETARGETING',
|
__pycache__/Streamlit_functions.cpython-310.pyc
CHANGED
Binary files a/__pycache__/Streamlit_functions.cpython-310.pyc and b/__pycache__/Streamlit_functions.cpython-310.pyc differ
|
|
__pycache__/classes.cpython-310.pyc
CHANGED
Binary files a/__pycache__/classes.cpython-310.pyc and b/__pycache__/classes.cpython-310.pyc differ
|
|
__pycache__/response_curves_model_quality.cpython-310.pyc
CHANGED
Binary files a/__pycache__/response_curves_model_quality.cpython-310.pyc and b/__pycache__/response_curves_model_quality.cpython-310.pyc differ
|
|
__pycache__/response_curves_model_quality_base.cpython-310.pyc
CHANGED
Binary files a/__pycache__/response_curves_model_quality_base.cpython-310.pyc and b/__pycache__/response_curves_model_quality_base.cpython-310.pyc differ
|
|
__pycache__/utilities.cpython-310.pyc
CHANGED
Binary files a/__pycache__/utilities.cpython-310.pyc and b/__pycache__/utilities.cpython-310.pyc differ
|
|
__pycache__/utilities_with_panel.cpython-310.pyc
CHANGED
Binary files a/__pycache__/utilities_with_panel.cpython-310.pyc and b/__pycache__/utilities_with_panel.cpython-310.pyc differ
|
|
classes.py
CHANGED
@@ -100,7 +100,7 @@ class Channel:
|
|
100 |
self.modified_total_sales = self.modified_sales.sum()
|
101 |
self.delta_spends = self.modified_total_spends - self.actual_total_spends
|
102 |
self.delta_sales = self.modified_total_sales - self.actual_total_sales
|
103 |
-
# print(self.actual_total_spends)
|
104 |
def update_penalty(self, penalty):
|
105 |
self.penalty = penalty
|
106 |
|
@@ -108,23 +108,25 @@ class Channel:
|
|
108 |
return spends_array * total_spends / spends_array.sum()
|
109 |
|
110 |
def modify_spends(self, total_spends):
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
116 |
|
117 |
def calculate_sales(self):
|
118 |
-
print("in calc_sales")
|
119 |
-
print(self.modified_spends)
|
120 |
return self.response_curve(self.modified_spends)
|
121 |
|
122 |
def hill_equation(x, Kd, n):
|
123 |
return x**n / (Kd**n + x**n)
|
124 |
def response_curve(self, x):
|
125 |
-
print(x)
|
126 |
# if self.penalty:
|
127 |
-
# print("in penalty")
|
128 |
# x = np.where(
|
129 |
# x < self.upper_limit,
|
130 |
# x,
|
@@ -132,14 +134,14 @@ class Channel:
|
|
132 |
# )
|
133 |
if self.response_curve_type == "hill-eq":
|
134 |
# dividing_parameter = check_dividing_parameter()
|
135 |
-
# print("lalala")
|
136 |
-
# # print(self.name)\
|
137 |
-
# print(len(x))
|
138 |
-
print("in response curve function")
|
139 |
-
print(x)
|
140 |
if len(x) == 1:
|
141 |
dividing_rate = self.response_curve_params["num_pos_obsv"]
|
142 |
-
# print(dividing_rate)
|
143 |
# x = np.sum(x)
|
144 |
else:
|
145 |
dividing_rate = 1
|
@@ -152,14 +154,14 @@ class Channel:
|
|
152 |
x_max= self.response_curve_params["x_max"]
|
153 |
y_min= self.response_curve_params["y_min"]
|
154 |
y_max= self.response_curve_params['y_max']
|
155 |
-
# # print(x_min)
|
156 |
-
# # print(Kd,n,x_min,x_max,y_min,y_max)
|
157 |
-
# # print(np.sum(x)/104)
|
158 |
x_inp = ( x/dividing_rate- x_min) / (x_max - x_min)
|
159 |
-
# # print("x",x)
|
160 |
-
# # print("x_inp",x_inp)
|
161 |
x_out = x_inp**n / (Kd**n + x_inp**n) #self.hill_equation(x_inp,Kd, n)
|
162 |
-
# # print("x_out",x_out)
|
163 |
|
164 |
|
165 |
x_val_inv = (x_out*x_max + (1 - x_out) * x_min)
|
@@ -167,12 +169,12 @@ class Channel:
|
|
167 |
# sales = ((x_max - x_min)*x_out + x_min)*dividing_rate
|
168 |
|
169 |
sales[np.isnan(sales)] = 0
|
170 |
-
# # print(sales)
|
171 |
-
# # print(np.sum(sales))
|
172 |
-
# # print("sales",sales)
|
173 |
-
print("aa")
|
174 |
-
print(sales)
|
175 |
-
print("aa1")
|
176 |
if self.response_curve_type == "s-curve":
|
177 |
if self.power >= 0:
|
178 |
x = x / 10**self.power
|
@@ -289,23 +291,24 @@ class Scenario:
|
|
289 |
|
290 |
def calculate_actual_total_sales(self):
|
291 |
total_actual_sales = 0#self.constant.sum() + self.correction.sum()
|
292 |
-
# print("a")
|
293 |
for channel in self.channels.values():
|
294 |
total_actual_sales += channel.actual_total_sales
|
295 |
-
# # print(channel.actual_total_sales)
|
296 |
-
# # print(total_actual_sales)
|
297 |
return total_actual_sales
|
298 |
|
299 |
def calculate_modified_total_sales(self):
|
300 |
|
301 |
total_modified_sales = 0 #self.constant.sum() + self.correction.sum()
|
302 |
-
# print(total_modified_sales)
|
303 |
for channel in self.channels.values():
|
304 |
-
# print(channel,channel.modified_total_sales)
|
305 |
total_modified_sales += channel.modified_total_sales
|
306 |
return total_modified_sales
|
307 |
|
308 |
def update(self, channel_name, modified_spends):
|
|
|
309 |
self.channels[channel_name].update(modified_spends)
|
310 |
self.modified_total_sales = self.calculate_modified_total_sales()
|
311 |
self.modified_total_spends = self.calculate_modified_total_spends()
|
@@ -363,21 +366,33 @@ class Scenario:
|
|
363 |
|
364 |
|
365 |
def optimize_spends(self, sales_percent, channels_list, algo="trust-constr"):
|
366 |
-
|
|
|
367 |
desired_sales = self.actual_total_sales * (1 + sales_percent / 100.0)
|
368 |
|
369 |
def constraint(x):
|
370 |
for ch, spends in zip(channels_list, x):
|
371 |
self.update(ch, spends)
|
372 |
return self.modified_total_sales - desired_sales
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
373 |
|
374 |
bounds = []
|
375 |
for ch in channels_list:
|
376 |
-
bounds.append(
|
377 |
-
|
378 |
-
|
379 |
-
)
|
380 |
-
|
|
|
|
|
|
|
381 |
initial_point = []
|
382 |
for bound in bounds:
|
383 |
initial_point.append(bound[0])
|
@@ -385,7 +400,9 @@ class Scenario:
|
|
385 |
|
386 |
power = np.ceil(np.log(sum(initial_point)) / np.log(10))
|
387 |
|
388 |
-
constraints = [NonlinearConstraint(constraint, -1.0, 1.0)
|
|
|
|
|
389 |
|
390 |
res = minimize(
|
391 |
lambda x: sum(x) / 10 ** (power),
|
@@ -411,12 +428,13 @@ class Scenario:
|
|
411 |
for channel_name in channels_list:
|
412 |
# spends_constraint += self.channels[channel_name].modified_total_spends
|
413 |
spends_constant.append(self.channels[channel_name].conversion_rate)
|
414 |
-
# print(spends_constant)
|
415 |
spends_constraint += (
|
416 |
-
self.channels[channel_name].actual_total_spends
|
417 |
-
|
418 |
)
|
419 |
-
|
|
|
420 |
constraint= LinearConstraint(np.ones((num_channels,)), lb = spends_constraint, ub = spends_constraint)
|
421 |
# constraint = LinearConstraint(
|
422 |
# np.array(spends_constant),
|
@@ -428,14 +446,17 @@ class Scenario:
|
|
428 |
for channel_name in channels_list:
|
429 |
_channel_class = self.channels[channel_name]
|
430 |
channel_bounds = _channel_class.bounds
|
431 |
-
channel_actual_total_spends = _channel_class.actual_total_spends
|
432 |
-
|
433 |
-
)
|
|
|
434 |
old_spends.append(channel_actual_total_spends)
|
435 |
# bounds.append((1+ channel_bounds / 100) * channel_actual_total_spends)
|
436 |
-
lb = (1- _channel_class.channel_bounds_min / 100) *
|
437 |
-
ub = (1+
|
438 |
bounds.append((lb,ub))
|
|
|
|
|
439 |
# _channel_class.channel_bounds_min
|
440 |
# _channel_class.channel_bounds_max
|
441 |
def cost_func1(channel,x):
|
@@ -450,44 +471,44 @@ class Scenario:
|
|
450 |
y_max= param_dicts['y_max'][channel]
|
451 |
division_parameter = param_dicts['num_pos_obsv'][channel]
|
452 |
x_inp = ( x/division_parameter- x_min) / (x_max - x_min)
|
453 |
-
# print(x_inp)
|
454 |
x_out = x_inp**n / (Kd**n + x_inp**n)
|
455 |
x_val_inv = (x_out*x_max + (1 - x_out) * x_min)
|
456 |
sales = (x_val_inv*y_min/y_max)*division_parameter
|
457 |
if np.isnan(sales):
|
458 |
-
# print(sales,channel)
|
459 |
sales = 0
|
460 |
-
# print(sales,channel)
|
461 |
return sales
|
462 |
def objective_function(x):
|
463 |
-
|
|
|
464 |
for channel_name, modified_spends in zip(channels_list, x):
|
465 |
-
|
466 |
-
#
|
|
|
|
|
|
|
|
|
467 |
return -1 * self.modified_total_sales
|
468 |
|
469 |
-
print(bounds)
|
470 |
-
# # print("$"*100)
|
471 |
res = minimize(
|
472 |
-
lambda x: objective_function(x)
|
473 |
method="trust-constr",
|
474 |
x0=old_spends,
|
475 |
constraints=constraint,
|
476 |
bounds=bounds,
|
477 |
options={"maxiter": int(1e7), "xtol": 0.1},
|
478 |
)
|
479 |
-
|
480 |
-
# objective_function,
|
481 |
-
# x0=old_spends,
|
482 |
-
# mi
|
483 |
-
# constraints=constraint,
|
484 |
-
# bounds=bounds,
|
485 |
-
# tol=1e-16
|
486 |
-
# )
|
487 |
-
# # print(res)
|
488 |
for channel_name, modified_spends in zip(channels_list, res.x):
|
|
|
489 |
self.update(channel_name, modified_spends)
|
490 |
-
print(channel_name, modified_spends,cost_func1(channel_name, modified_spends))
|
|
|
|
|
491 |
|
492 |
return zip(channels_list, res.x)
|
493 |
|
@@ -512,7 +533,7 @@ class Scenario:
|
|
512 |
|
513 |
# x_vars=[]
|
514 |
# x_vars = [m.Var(value=param_dicts["current_spends"][_], lb=param_dicts["x_min"][_]*104, ub=5*param_dicts["current_spends"][_]) for _ in channels_list]
|
515 |
-
# # print(x_vars)
|
516 |
# # x_vars,lower_bounds
|
517 |
|
518 |
# # Define the objective function to minimize
|
@@ -520,8 +541,8 @@ class Scenario:
|
|
520 |
# spends = 0
|
521 |
# i = 0
|
522 |
# for i,c in enumerate(channels_list):
|
523 |
-
# # # print(c)
|
524 |
-
# # # print(x_vars[i])
|
525 |
# cost = cost + (self.cost_func(c, x_vars[i]))
|
526 |
# spends = spends +x_vars[i]
|
527 |
|
@@ -536,7 +557,7 @@ class Scenario:
|
|
536 |
# m.solve(disp=True)
|
537 |
|
538 |
# for i, var in enumerate(x_vars):
|
539 |
-
# # print(f"x{i+1} = {var.value[0]}")
|
540 |
|
541 |
# for channel_name, modified_spends in zip(channels_list, x_vars):
|
542 |
# self.update(channel_name, modified_spends.value[0])
|
|
|
100 |
self.modified_total_sales = self.modified_sales.sum()
|
101 |
self.delta_spends = self.modified_total_spends - self.actual_total_spends
|
102 |
self.delta_sales = self.modified_total_sales - self.actual_total_sales
|
103 |
+
# # # print(self.actual_total_spends)
|
104 |
def update_penalty(self, penalty):
|
105 |
self.penalty = penalty
|
106 |
|
|
|
108 |
return spends_array * total_spends / spends_array.sum()
|
109 |
|
110 |
def modify_spends(self, total_spends):
|
111 |
+
# # # print(total_spends)
|
112 |
+
self.modified_spends[0] = total_spends
|
113 |
+
# (
|
114 |
+
# self.modified_spends * total_spends / self.modified_spends.sum()
|
115 |
+
# )
|
116 |
+
# # # print("in spends")
|
117 |
+
# # # print(self.modified_spends,self.modified_spends.sum())
|
118 |
|
119 |
def calculate_sales(self):
|
120 |
+
# # # print("in calc_sales")
|
121 |
+
# # # print(self.modified_spends)
|
122 |
return self.response_curve(self.modified_spends)
|
123 |
|
124 |
def hill_equation(x, Kd, n):
|
125 |
return x**n / (Kd**n + x**n)
|
126 |
def response_curve(self, x):
|
127 |
+
# # # print(x)
|
128 |
# if self.penalty:
|
129 |
+
# # # print("in penalty")
|
130 |
# x = np.where(
|
131 |
# x < self.upper_limit,
|
132 |
# x,
|
|
|
134 |
# )
|
135 |
if self.response_curve_type == "hill-eq":
|
136 |
# dividing_parameter = check_dividing_parameter()
|
137 |
+
# # # print("lalala")
|
138 |
+
# # # # print(self.name)\
|
139 |
+
# # # print(len(x))
|
140 |
+
# # # print("in response curve function")
|
141 |
+
# # # print(x)
|
142 |
if len(x) == 1:
|
143 |
dividing_rate = self.response_curve_params["num_pos_obsv"]
|
144 |
+
# # # print(dividing_rate)
|
145 |
# x = np.sum(x)
|
146 |
else:
|
147 |
dividing_rate = 1
|
|
|
154 |
x_max= self.response_curve_params["x_max"]
|
155 |
y_min= self.response_curve_params["y_min"]
|
156 |
y_max= self.response_curve_params['y_max']
|
157 |
+
# # # # print(x_min)
|
158 |
+
# # # # print(Kd,n,x_min,x_max,y_min,y_max)
|
159 |
+
# # # # print(np.sum(x)/104)
|
160 |
x_inp = ( x/dividing_rate- x_min) / (x_max - x_min)
|
161 |
+
# # # # print("x",x)
|
162 |
+
# # # # print("x_inp",x_inp)
|
163 |
x_out = x_inp**n / (Kd**n + x_inp**n) #self.hill_equation(x_inp,Kd, n)
|
164 |
+
# # # # print("x_out",x_out)
|
165 |
|
166 |
|
167 |
x_val_inv = (x_out*x_max + (1 - x_out) * x_min)
|
|
|
169 |
# sales = ((x_max - x_min)*x_out + x_min)*dividing_rate
|
170 |
|
171 |
sales[np.isnan(sales)] = 0
|
172 |
+
# # # # print(sales)
|
173 |
+
# # # # print(np.sum(sales))
|
174 |
+
# # # # print("sales",sales)
|
175 |
+
# # # print("aa")
|
176 |
+
# # # print(sales)
|
177 |
+
# # # print("aa1")
|
178 |
if self.response_curve_type == "s-curve":
|
179 |
if self.power >= 0:
|
180 |
x = x / 10**self.power
|
|
|
291 |
|
292 |
def calculate_actual_total_sales(self):
|
293 |
total_actual_sales = 0#self.constant.sum() + self.correction.sum()
|
294 |
+
# # # print("a")
|
295 |
for channel in self.channels.values():
|
296 |
total_actual_sales += channel.actual_total_sales
|
297 |
+
# # # # print(channel.actual_total_sales)
|
298 |
+
# # # # print(total_actual_sales)
|
299 |
return total_actual_sales
|
300 |
|
301 |
def calculate_modified_total_sales(self):
|
302 |
|
303 |
total_modified_sales = 0 #self.constant.sum() + self.correction.sum()
|
304 |
+
# # # print(total_modified_sales)
|
305 |
for channel in self.channels.values():
|
306 |
+
# # # print(channel,channel.modified_total_sales)
|
307 |
total_modified_sales += channel.modified_total_sales
|
308 |
return total_modified_sales
|
309 |
|
310 |
def update(self, channel_name, modified_spends):
|
311 |
+
# # # print("in updtw")
|
312 |
self.channels[channel_name].update(modified_spends)
|
313 |
self.modified_total_sales = self.calculate_modified_total_sales()
|
314 |
self.modified_total_spends = self.calculate_modified_total_spends()
|
|
|
366 |
|
367 |
|
368 |
def optimize_spends(self, sales_percent, channels_list, algo="trust-constr"):
|
369 |
+
num_channels = len(channels_list)
|
370 |
+
# # # # print("%"*100)
|
371 |
desired_sales = self.actual_total_sales * (1 + sales_percent / 100.0)
|
372 |
|
373 |
def constraint(x):
|
374 |
for ch, spends in zip(channels_list, x):
|
375 |
self.update(ch, spends)
|
376 |
return self.modified_total_sales - desired_sales
|
377 |
+
|
378 |
+
# def calc_overall_bounds(channels_list):
|
379 |
+
# total_spends=0
|
380 |
+
# for ch in zip(channels_list):
|
381 |
+
# print(ch)
|
382 |
+
# total_spends= total_spends+self.channels[ch].actual_total_spends
|
383 |
+
# return total_spends
|
384 |
+
|
385 |
|
386 |
bounds = []
|
387 |
for ch in channels_list:
|
388 |
+
# bounds.append(
|
389 |
+
# (1+np.array([-50.0, 100.0]) / 100.0)
|
390 |
+
# * self.channels[ch].actual_total_spends
|
391 |
+
# )
|
392 |
+
lb = (1- int(self.channels[ch].channel_bounds_min) / 100) * self.channels[ch].actual_total_spends
|
393 |
+
ub = (1+ int(self.channels[ch].channel_bounds_max) / 100) * self.channels[ch].actual_total_spends
|
394 |
+
bounds.append((lb,ub))
|
395 |
+
# # # # print(self.channels[ch].actual_total_spends)
|
396 |
initial_point = []
|
397 |
for bound in bounds:
|
398 |
initial_point.append(bound[0])
|
|
|
400 |
|
401 |
power = np.ceil(np.log(sum(initial_point)) / np.log(10))
|
402 |
|
403 |
+
constraints = [NonlinearConstraint(constraint, -1.0, 1.0),
|
404 |
+
# LinearConstraint(np.ones((num_channels,)), lb = -50*calc_overall_bounds(channels_list), ub = 50*calc_overall_bounds(channels_list))
|
405 |
+
]
|
406 |
|
407 |
res = minimize(
|
408 |
lambda x: sum(x) / 10 ** (power),
|
|
|
428 |
for channel_name in channels_list:
|
429 |
# spends_constraint += self.channels[channel_name].modified_total_spends
|
430 |
spends_constant.append(self.channels[channel_name].conversion_rate)
|
431 |
+
# # # print(spends_constant)
|
432 |
spends_constraint += (
|
433 |
+
self.channels[channel_name].actual_total_spends+ self.channels[channel_name].delta_spends
|
434 |
+
#* (1 + self.channels[channel_name].delta_spends / 100)
|
435 |
)
|
436 |
+
# # # print("delta spends",self.channels[channel_name].delta_spends)
|
437 |
+
# spends_constraint = spends_constraint * (1 + spends_percent / 100)
|
438 |
constraint= LinearConstraint(np.ones((num_channels,)), lb = spends_constraint, ub = spends_constraint)
|
439 |
# constraint = LinearConstraint(
|
440 |
# np.array(spends_constant),
|
|
|
446 |
for channel_name in channels_list:
|
447 |
_channel_class = self.channels[channel_name]
|
448 |
channel_bounds = _channel_class.bounds
|
449 |
+
channel_actual_total_spends = _channel_class.actual_total_spends + _channel_class.delta_spends
|
450 |
+
# * (
|
451 |
+
# (1 + _channel_class.delta_spends / 100)
|
452 |
+
# )
|
453 |
old_spends.append(channel_actual_total_spends)
|
454 |
# bounds.append((1+ channel_bounds / 100) * channel_actual_total_spends)
|
455 |
+
lb = (1- int(_channel_class.channel_bounds_min) / 100) * _channel_class.actual_total_spends
|
456 |
+
ub = (1+ int(_channel_class.channel_bounds_max) / 100) * _channel_class.actual_total_spends
|
457 |
bounds.append((lb,ub))
|
458 |
+
# # # print("aaaaaa")
|
459 |
+
# # print((_channel_class.channel_bounds_max,_channel_class.channel_bounds_min))
|
460 |
# _channel_class.channel_bounds_min
|
461 |
# _channel_class.channel_bounds_max
|
462 |
def cost_func1(channel,x):
|
|
|
471 |
y_max= param_dicts['y_max'][channel]
|
472 |
division_parameter = param_dicts['num_pos_obsv'][channel]
|
473 |
x_inp = ( x/division_parameter- x_min) / (x_max - x_min)
|
474 |
+
# # # print(x_inp)
|
475 |
x_out = x_inp**n / (Kd**n + x_inp**n)
|
476 |
x_val_inv = (x_out*x_max + (1 - x_out) * x_min)
|
477 |
sales = (x_val_inv*y_min/y_max)*division_parameter
|
478 |
if np.isnan(sales):
|
479 |
+
# # # print(sales,channel)
|
480 |
sales = 0
|
481 |
+
# # # print(sales,channel)
|
482 |
return sales
|
483 |
def objective_function(x):
|
484 |
+
sales = 0
|
485 |
+
it = 0
|
486 |
for channel_name, modified_spends in zip(channels_list, x):
|
487 |
+
# sales = sales + cost_func1(channel_name,modified_spends)
|
488 |
+
# print(channel_name, modified_spends,cost_func1(channel_name, modified_spends))
|
489 |
+
it+=1
|
490 |
+
self.update(channel_name, modified_spends)
|
491 |
+
# # # print(self.modified_total_sales)
|
492 |
+
# # # print(channel_name, modified_spends)
|
493 |
return -1 * self.modified_total_sales
|
494 |
|
495 |
+
# # # print(bounds)
|
496 |
+
# # # # print("$"*100)
|
497 |
res = minimize(
|
498 |
+
lambda x: objective_function(x)/1e3,
|
499 |
method="trust-constr",
|
500 |
x0=old_spends,
|
501 |
constraints=constraint,
|
502 |
bounds=bounds,
|
503 |
options={"maxiter": int(1e7), "xtol": 0.1},
|
504 |
)
|
505 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
506 |
for channel_name, modified_spends in zip(channels_list, res.x):
|
507 |
+
# # # print("aaaaaaaaaaaaaa")
|
508 |
self.update(channel_name, modified_spends)
|
509 |
+
# # # print(channel_name, modified_spends,cost_func1(channel_name, modified_spends))
|
510 |
+
|
511 |
+
# print(it)
|
512 |
|
513 |
return zip(channels_list, res.x)
|
514 |
|
|
|
533 |
|
534 |
# x_vars=[]
|
535 |
# x_vars = [m.Var(value=param_dicts["current_spends"][_], lb=param_dicts["x_min"][_]*104, ub=5*param_dicts["current_spends"][_]) for _ in channels_list]
|
536 |
+
# # # # print(x_vars)
|
537 |
# # x_vars,lower_bounds
|
538 |
|
539 |
# # Define the objective function to minimize
|
|
|
541 |
# spends = 0
|
542 |
# i = 0
|
543 |
# for i,c in enumerate(channels_list):
|
544 |
+
# # # # # print(c)
|
545 |
+
# # # # # print(x_vars[i])
|
546 |
# cost = cost + (self.cost_func(c, x_vars[i]))
|
547 |
# spends = spends +x_vars[i]
|
548 |
|
|
|
557 |
# m.solve(disp=True)
|
558 |
|
559 |
# for i, var in enumerate(x_vars):
|
560 |
+
# # # # print(f"x{i+1} = {var.value[0]}")
|
561 |
|
562 |
# for channel_name, modified_spends in zip(channels_list, x_vars):
|
563 |
# self.update(channel_name, modified_spends.value[0])
|
pages/1_Model_Quality.py
CHANGED
@@ -7,13 +7,126 @@ import response_curves_model_quality_base as rc1
|
|
7 |
st.set_page_config(
|
8 |
layout="wide"
|
9 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
|
12 |
st.header("Model Quality")
|
13 |
# st.write("MMM Model Quality")
|
14 |
|
15 |
st.plotly_chart(sf.mmm_model_quality(),use_container_width=True)
|
16 |
-
|
|
|
|
|
|
|
|
|
17 |
media_df = sf.media_data()
|
18 |
# Create two columns for start date and end date input
|
19 |
col1, col2 , col3 = st.columns([1,0.2,1])
|
@@ -23,10 +136,14 @@ st.dataframe(df1,hide_index = True,use_container_width=True)
|
|
23 |
# st.plotly_chart(sf.elasticity_and_media(media_df))
|
24 |
with col1:
|
25 |
st.plotly_chart(sf.elasticity(media_df))
|
|
|
|
|
26 |
with col2:
|
27 |
st.write("")
|
28 |
with col3:
|
29 |
st.plotly_chart(sf.half_life(media_df))
|
|
|
|
|
30 |
|
31 |
|
32 |
# Dropdown menu options
|
@@ -57,4 +174,16 @@ with col1:
|
|
57 |
|
58 |
with col2:
|
59 |
st.write("")
|
60 |
-
st.plotly_chart(rc1.response_curves(selected_option,selected_option2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
st.set_page_config(
|
8 |
layout="wide"
|
9 |
)
|
10 |
+
from pptx import Presentation
|
11 |
+
from pptx.util import Inches
|
12 |
+
from io import BytesIO
|
13 |
+
import plotly.io as pio
|
14 |
+
import Streamlit_functions as sf
|
15 |
+
import response_curves_model_quality_base as rc1
|
16 |
+
|
17 |
+
|
18 |
+
def save_ppt_file():
|
19 |
+
|
20 |
+
# Initialize PowerPoint presentation
|
21 |
+
prs = Presentation()
|
22 |
+
# Helper function to add Plotly figure to slide
|
23 |
+
def add_plotly_chart_to_slide(slide, fig, left, top, width, height):
|
24 |
+
img_stream = BytesIO()
|
25 |
+
pio.write_image(fig, img_stream, format='png',engine="orca")
|
26 |
+
slide.shapes.add_picture(img_stream, left, top, width, height)
|
27 |
+
|
28 |
+
# Slide 1: Model Quality with Chart
|
29 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[5])
|
30 |
+
title_1 = slide_1.shapes.title
|
31 |
+
title_1.text = "Model Quality"
|
32 |
+
i = 0
|
33 |
+
# print (i)
|
34 |
+
# Generate Plotly chart
|
35 |
+
fig = sf.mmm_model_quality()
|
36 |
+
|
37 |
+
# Add the Plotly chart to the slide
|
38 |
+
add_plotly_chart_to_slide(slide_1, fig, Inches(1), Inches(2), width=Inches(9), height=Inches(4.5))
|
39 |
+
i = i+1
|
40 |
+
# print (i)
|
41 |
+
# Slide 2: Media Data Elasticity
|
42 |
+
slide_2 = prs.slides.add_slide(prs.slide_layouts[5])
|
43 |
+
title_2 = slide_2.shapes.title
|
44 |
+
title_2.text = "Media Data Elasticity"
|
45 |
+
i = i+1
|
46 |
+
# print (i)
|
47 |
+
# Generate Elasticity chart
|
48 |
+
media_df = sf.media_data()
|
49 |
+
fig = sf.elasticity(media_df)
|
50 |
+
fig.update_layout(
|
51 |
+
margin=dict(l=150, r=50, t=50, b=50), # Adjust margins
|
52 |
+
# xaxis=dict(tickangle=-45) # Rotate x-axis labels if needed
|
53 |
+
)
|
54 |
+
i = i+1
|
55 |
+
# print (i)
|
56 |
+
# Add the Plotly chart to the slide
|
57 |
+
add_plotly_chart_to_slide(slide_2, fig, Inches(1), Inches(2), width=Inches(8), height=Inches(4.5))
|
58 |
+
i = i+1
|
59 |
+
# print (i)
|
60 |
+
# Slide 3: Half-Life Analysis
|
61 |
+
slide_3 = prs.slides.add_slide(prs.slide_layouts[5])
|
62 |
+
title_3 = slide_3.shapes.title
|
63 |
+
title_3.text = "Half-Life Analysis"
|
64 |
+
i = i+1
|
65 |
+
# print (i)
|
66 |
+
# Generate Half-Life chart
|
67 |
+
fig = sf.half_life(media_df)
|
68 |
+
fig.update_layout(
|
69 |
+
margin=dict(l=150, r=100, t=50, b=50), # Adjust margins
|
70 |
+
# xaxis=dict(tickangle=-45) # Rotate x-axis labels if needed
|
71 |
+
)
|
72 |
+
i = i+1
|
73 |
+
# print (i)
|
74 |
+
# Add the Plotly chart to the slide
|
75 |
+
add_plotly_chart_to_slide(slide_3, fig, Inches(1), Inches(2), width=Inches(8), height=Inches(4.5))
|
76 |
+
i = i+1
|
77 |
+
# print (i)
|
78 |
+
# Slide 4: Response Curves
|
79 |
+
|
80 |
+
# Generate Response Curves chart
|
81 |
+
channels = [
|
82 |
+
'Broadcast TV',
|
83 |
+
'Cable TV',
|
84 |
+
'Connected & OTT TV',
|
85 |
+
'Display Prospecting',
|
86 |
+
'Display Retargeting',
|
87 |
+
'Video',
|
88 |
+
'Social Prospecting',
|
89 |
+
'Social Retargeting',
|
90 |
+
'Search Brand',
|
91 |
+
'Search Non-brand',
|
92 |
+
'Digital Partners',
|
93 |
+
'Audio',
|
94 |
+
'Email']
|
95 |
+
i = 4
|
96 |
+
for channel_name in channels:
|
97 |
+
slide_4 = prs.slides.add_slide(prs.slide_layouts[5])
|
98 |
+
title_4 = slide_4.shapes.title
|
99 |
+
title_4.text = "Response Curves"
|
100 |
+
i = i+1
|
101 |
+
# print (i)
|
102 |
+
selected_option = channel_name
|
103 |
+
selected_option2 = 'View Line Plot'
|
104 |
+
fig = rc1.response_curves(selected_option, selected_option2)
|
105 |
+
# Add the Plotly chart to the slide
|
106 |
+
add_plotly_chart_to_slide(slide_4, fig, Inches(1), Inches(2), width=Inches(6), height=Inches(4.5))
|
107 |
+
# Save the PowerPoint presentation
|
108 |
+
# prs.save('MMM_Model_Quality_Presentation.pptx')
|
109 |
+
# # print("PowerPoint slides created successfully.")
|
110 |
+
|
111 |
+
# Save to a BytesIO object
|
112 |
+
ppt_stream = BytesIO()
|
113 |
+
prs.save(ppt_stream)
|
114 |
+
ppt_stream.seek(0)
|
115 |
+
|
116 |
+
return ppt_stream.getvalue()
|
117 |
+
|
118 |
+
|
119 |
|
120 |
|
121 |
st.header("Model Quality")
|
122 |
# st.write("MMM Model Quality")
|
123 |
|
124 |
st.plotly_chart(sf.mmm_model_quality(),use_container_width=True)
|
125 |
+
fig = sf.mmm_model_quality()
|
126 |
+
# print("aaa")
|
127 |
+
fig.write_image("chart.png",engine="orca")
|
128 |
+
# print("bbb")
|
129 |
+
|
130 |
media_df = sf.media_data()
|
131 |
# Create two columns for start date and end date input
|
132 |
col1, col2 , col3 = st.columns([1,0.2,1])
|
|
|
136 |
# st.plotly_chart(sf.elasticity_and_media(media_df))
|
137 |
with col1:
|
138 |
st.plotly_chart(sf.elasticity(media_df))
|
139 |
+
fig = sf.elasticity(media_df)
|
140 |
+
fig.write_image("chart.png",engine="orca")
|
141 |
with col2:
|
142 |
st.write("")
|
143 |
with col3:
|
144 |
st.plotly_chart(sf.half_life(media_df))
|
145 |
+
fig = sf.elasticity(media_df)
|
146 |
+
fig.write_image("chart.png",engine="orca")
|
147 |
|
148 |
|
149 |
# Dropdown menu options
|
|
|
174 |
|
175 |
with col2:
|
176 |
st.write("")
|
177 |
+
st.plotly_chart(rc1.response_curves(selected_option,selected_option2))
|
178 |
+
|
179 |
+
if st.button("Prepare Analysis Download"):
|
180 |
+
ppt_file = save_ppt_file()
|
181 |
+
# Add a download button
|
182 |
+
st.download_button(
|
183 |
+
label="Download Analysis",
|
184 |
+
data=ppt_file,
|
185 |
+
file_name="MMM_Model_Quality_Presentation.pptx",
|
186 |
+
mime="application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
187 |
+
)
|
188 |
+
|
189 |
+
|
pages/2_Scenario_Planner.py
CHANGED
@@ -40,6 +40,11 @@ st.set_page_config(layout="wide")
|
|
40 |
load_local_css("styles.css")
|
41 |
set_header()
|
42 |
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
for k, v in st.session_state.items():
|
45 |
if k not in ["logout", "login", "config"] and not k.startswith("FormSubmitter"):
|
@@ -48,6 +53,73 @@ for k, v in st.session_state.items():
|
|
48 |
# ======================= Functions ====================== #
|
49 |
# ======================================================== #
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def first_day_of_next_year(date):
|
52 |
next_year = date.year + 1
|
53 |
first_day = datetime(next_year, 1, 1).date()
|
@@ -91,15 +163,15 @@ def optimize(key, status_placeholder):
|
|
91 |
if key.lower() == "media spends":
|
92 |
with status_placeholder:
|
93 |
with st.spinner("Optimizing"):
|
94 |
-
# # print(channel_list)
|
95 |
-
# # print(st.session_state["total_spends_change"])
|
96 |
result = st.session_state["scenario"].optimize(
|
97 |
st.session_state["total_spends_change"], channel_list
|
98 |
# result = st.session_state["scenario"].spends_optimisation(
|
99 |
# st.session_state["total_spends_change"], channel_list
|
100 |
)
|
101 |
-
print("")
|
102 |
-
print(list(zip(*result)))
|
103 |
|
104 |
|
105 |
|
@@ -142,7 +214,7 @@ def save_scenario(scenario_name):
|
|
142 |
st.session_state["scenario"]
|
143 |
)
|
144 |
st.session_state["scenario_input"] = ""
|
145 |
-
# # print(type(st.session_state['saved_scenarios']))
|
146 |
with open("../saved_scenarios.pkl", "wb") as f:
|
147 |
pickle.dump(st.session_state["saved_scenarios"], f)
|
148 |
|
@@ -219,9 +291,9 @@ def update_sales():
|
|
219 |
# def update_all_spends_abs_slider():
|
220 |
# actual_spends = _scenario.actual_total_spends
|
221 |
# if validate_input(st.session_state["total_spends_change_abs_slider"]):
|
222 |
-
# # print("#" * 100)
|
223 |
-
# # print(st.session_state["total_spends_change_abs_slider"])C:\Users\PragyaJatav\Downloads\Untitled Folder 2\simulatorAldi\pages\8_Scenario_Planner.py
|
224 |
-
# # print("#" * 100)
|
225 |
|
226 |
# modified_spends = extract_number_for_string(
|
227 |
# st.session_state["total_spends_change_abs_slider"]
|
@@ -237,6 +309,7 @@ def update_sales():
|
|
237 |
|
238 |
|
239 |
def update_all_spends_abs():
|
|
|
240 |
if (
|
241 |
st.session_state["total_spends_change_abs"]
|
242 |
in st.session_state["total_spends_change_abs_slider_options"]
|
@@ -244,6 +317,7 @@ def update_all_spends_abs():
|
|
244 |
st.session_state["allow_spends_update"] = True
|
245 |
else:
|
246 |
st.session_state["allow_spends_update"] = False
|
|
|
247 |
|
248 |
actual_spends = _scenario.actual_total_spends
|
249 |
if (
|
@@ -417,7 +491,7 @@ def update_penalty():
|
|
417 |
|
418 |
|
419 |
def reset_scenario(panel_selected, file_selected, updated_rcs):
|
420 |
-
# ## print(st.session_state['default_scenario_dict'])
|
421 |
# st.session_state['scenario'] = class_from_dict(st.session_state['default_scenario_dict'])
|
422 |
# for channel in st.session_state['scenario'].channels.values():
|
423 |
# st.session_state[channel.name] = float(channel.actual_total_spends * channel.conversion_rate)
|
@@ -556,13 +630,13 @@ def calculate_rgba(
|
|
556 |
|
557 |
|
558 |
def debug_temp(x_test, power, K, b, a, x0):
|
559 |
-
# # print("*" * 100)
|
560 |
# Calculate the count of bins
|
561 |
count_lower_bin = sum(1 for x in x_test if x <= 2524)
|
562 |
count_center_bin = sum(1 for x in x_test if x > 2524 and x <= 3377)
|
563 |
count_ = sum(1 for x in x_test if x > 3377)
|
564 |
|
565 |
-
# # print(
|
566 |
# f"""
|
567 |
# lower : {count_lower_bin}
|
568 |
# center : {count_center_bin}
|
@@ -596,7 +670,7 @@ def plot_response_curves(summary_df_sorted):
|
|
596 |
'Audio',
|
597 |
'Email']
|
598 |
summary_df_sorted.index = summary_df_sorted["Channel_name"]
|
599 |
-
figures = [rc.response_curves(channels_list[i], summary_df_sorted["Optimized_spend"][channels_list[i]]
|
600 |
|
601 |
# for i in range()
|
602 |
|
@@ -647,14 +721,14 @@ def upload_file_prospects_calc(df):
|
|
647 |
params = pd.read_excel(r"response_curves_parameters.xlsx",index_col = "channel")
|
648 |
param_dicts = {col: params[col].to_dict() for col in params.columns}
|
649 |
df.index = df.channel
|
650 |
-
# # print(param_dicts)
|
651 |
for col in df.channel:
|
652 |
x = df["Spends"][col]
|
653 |
dividing_rate = 104
|
654 |
# st.write(x)
|
655 |
x_inp = ( x/dividing_rate- param_dicts["x_min"][col]) / (param_dicts["x_max"][col] - param_dicts["x_min"][col])
|
656 |
x_out = x_inp**param_dicts["n"][col] / (param_dicts["Kd"][col]**param_dicts["n"][col] + x_inp**param_dicts["n"][col]) #self.hill_equation(x_inp,Kd, n)
|
657 |
-
# # print("x_out",x_out)
|
658 |
|
659 |
|
660 |
x_val_inv = (x_out*param_dicts["x_max"][col] + (1 - x_out) * param_dicts["x_min"][col])
|
@@ -665,7 +739,7 @@ def upload_file_prospects_calc(df):
|
|
665 |
# x_out = x**param_dicts["n"][col]/(param_dicts["Kd"][col]**param_dicts["n"][col]+ x**param_dicts["n"][col])
|
666 |
# x_out_inv = (x_out*(param_dicts["y_max"][col]-param_dicts["y_min"][col])+param_dicts["y_min"][col])*104
|
667 |
df["Prospects"][col] = sales
|
668 |
-
# # print(df)
|
669 |
return df
|
670 |
|
671 |
|
@@ -781,6 +855,105 @@ def reset_inputs():
|
|
781 |
|
782 |
st.session_state["initialized"] = False
|
783 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
784 |
def scenario_planner_plots():
|
785 |
with st.expander('Optimized Spends Overview'):
|
786 |
# if st.button('Refresh'):
|
@@ -823,11 +996,11 @@ def scenario_planner_plots():
|
|
823 |
light_red = 'rgba(250, 110, 10, 0.7)'
|
824 |
light_purple = 'rgba(255, 191, 69, 0.7)'
|
825 |
|
826 |
-
|
827 |
# Add actual vs optimized spend bars
|
828 |
|
829 |
|
830 |
-
|
831 |
text=summary_df_sorted['Actual_spend'].apply(format_number) + ' '
|
832 |
# +
|
833 |
# ' '+
|
@@ -836,62 +1009,63 @@ def scenario_planner_plots():
|
|
836 |
marker_color=light_blue))
|
837 |
|
838 |
|
839 |
-
|
840 |
text=summary_df_sorted['Optimized_spend'].apply(format_number) + ' '
|
841 |
# +
|
842 |
# '</br> (' + optimized_spend_percentage.astype(int).astype(str) + '%)'
|
843 |
,textposition='outside',#textfont=dict(size=30),
|
844 |
marker_color=light_orange))
|
845 |
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
title = "Actual vs. Optimized Spends",
|
850 |
margin=dict(t=40, b=40, l=40, r=40)
|
851 |
)
|
852 |
|
853 |
-
st.plotly_chart(
|
854 |
|
855 |
# Add actual vs optimized Contribution
|
856 |
-
|
857 |
-
|
858 |
name='Actual Contribution',text=summary_df_sorted['Old_sales'].apply(format_number),textposition='outside',
|
859 |
marker_color=light_blue,showlegend=True))
|
860 |
|
861 |
-
|
862 |
name='Optimized Contribution',text=summary_df_sorted['New_sales'].apply(format_number),textposition='outside',
|
863 |
marker_color=light_orange, showlegend=True))
|
864 |
|
865 |
|
866 |
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
title = "Actual vs. Optimized Contributions",
|
871 |
margin=dict(t=40, b=40, l=40, r=40)
|
872 |
# yaxis=dict(range=[0, 0.002]),
|
873 |
)
|
874 |
-
st.plotly_chart(
|
875 |
|
876 |
# Add actual vs optimized Efficiency bars
|
877 |
-
|
878 |
summary_df_sorted_p = summary_df_sorted[summary_df_sorted['Channel_name']!="Panel"]
|
879 |
-
|
880 |
name='Actual Efficiency', text=summary_df_sorted_p['old_efficiency'].apply(format_number) ,textposition='outside',
|
881 |
marker_color=light_blue,showlegend=True))
|
882 |
-
|
883 |
name='Optimized Efficiency',text=summary_df_sorted_p['new_efficiency'].apply(format_number),textposition='outside' ,
|
884 |
marker_color=light_orange,showlegend=True))
|
885 |
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
title = "Actual vs. Optimized Efficiency",
|
890 |
margin=dict(t=40, b=40, l=40, r=40),
|
891 |
# yaxis=dict(range=[0, 0.002]),
|
892 |
)
|
893 |
|
894 |
-
st.plotly_chart(
|
|
|
895 |
|
896 |
def give_demo():
|
897 |
def get_file_bytes(file_path):
|
@@ -1035,7 +1209,7 @@ if auth_status == True:
|
|
1035 |
# ========================== UI ========================== #
|
1036 |
# ======================================================== #
|
1037 |
|
1038 |
-
# # print(list(st.session_state.keys()))
|
1039 |
main_header = st.columns((2, 2))
|
1040 |
sub_header = st.columns((1, 1, 1, 1))
|
1041 |
_scenario = st.session_state["scenario"]
|
@@ -1071,7 +1245,7 @@ if auth_status == True:
|
|
1071 |
st.session_state["lower_bound_key"] = 10
|
1072 |
|
1073 |
if "upper_bound_key" not in st.session_state:
|
1074 |
-
st.session_state["upper_bound_key"] =
|
1075 |
|
1076 |
# st.write(_scenario.modified_total_sales)
|
1077 |
header_df = pd.DataFrame(index=["Actual","Simulated","Change","Percent Change"],columns=["Spends","Prospects"])
|
@@ -1377,12 +1551,9 @@ if auth_status == True:
|
|
1377 |
)
|
1378 |
|
1379 |
with _columns1[2]:
|
1380 |
-
#
|
1381 |
-
|
1382 |
-
|
1383 |
-
# args=(st.session_state["optimization_key"]),
|
1384 |
-
# use_container_width=True,
|
1385 |
-
# )
|
1386 |
|
1387 |
optimize_placeholder = st.empty()
|
1388 |
|
@@ -1399,6 +1570,24 @@ if auth_status == True:
|
|
1399 |
|
1400 |
_columns2 = st.columns((2, 2, 2,2))
|
1401 |
if st.session_state["optimization_key"] == "Media Spends":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1402 |
with _columns2[0]:
|
1403 |
spend_input = st.text_input(
|
1404 |
"Absolute",
|
@@ -1406,35 +1595,21 @@ if auth_status == True:
|
|
1406 |
# label_visibility="collapsed",
|
1407 |
on_change=update_all_spends_abs,
|
1408 |
)
|
1409 |
-
|
|
|
1410 |
with _columns2[1]:
|
1411 |
st.number_input(
|
1412 |
"Percent Change",
|
1413 |
key="total_spends_change",
|
1414 |
-
min_value
|
1415 |
-
max_value=
|
1416 |
-
step=
|
1417 |
value=0.00,
|
1418 |
on_change=update_spends,
|
1419 |
)
|
1420 |
|
1421 |
-
|
1422 |
-
|
1423 |
-
"Overall Lower Bound for Spends",
|
1424 |
-
value = 50
|
1425 |
-
# key = overall_lower_bound,
|
1426 |
-
# on_change=partial(update_data_bound_min_overall)
|
1427 |
-
)
|
1428 |
-
with _columns2[3]:
|
1429 |
-
overall_upper_bound = st.number_input(
|
1430 |
-
"Overall Upper Bound for Spends",
|
1431 |
-
value = 50
|
1432 |
-
# key = overall_upper_bound,
|
1433 |
-
# on_change=partial(update_data_bound_max_overall)
|
1434 |
-
)
|
1435 |
-
|
1436 |
-
min_value = round(_scenario.actual_total_spends * (1-overall_lower_bound/100))
|
1437 |
-
max_value = round(_scenario.actual_total_spends * (1-overall_upper_bound/100))
|
1438 |
st.session_state["total_spends_change_abs_slider_options"] = [
|
1439 |
numerize(value, 1)
|
1440 |
for value in range(min_value, max_value + 1, int(1e4))
|
@@ -1462,7 +1637,7 @@ if auth_status == True:
|
|
1462 |
key="total_sales_change",
|
1463 |
min_value=-50.00,
|
1464 |
max_value=50.00,
|
1465 |
-
step=
|
1466 |
|
1467 |
value=0.00,
|
1468 |
on_change=update_sales,
|
@@ -1578,8 +1753,9 @@ if auth_status == True:
|
|
1578 |
# st.write(channel_spends)
|
1579 |
# st.write(min_value)
|
1580 |
# st.write(max_value)
|
1581 |
-
### print(st.session_state[channel_name])
|
1582 |
-
|
|
|
1583 |
_columns_min = st.columns(2)
|
1584 |
with _columns_min[0]:
|
1585 |
spend_input = st.text_input(
|
@@ -1587,6 +1763,7 @@ if auth_status == True:
|
|
1587 |
key=channel_name,
|
1588 |
# label_visibility="collapsed",
|
1589 |
on_change=partial(update_data, channel_name),
|
|
|
1590 |
)
|
1591 |
channel_name_lower_bound = f"{channel_name}_lower_bound"
|
1592 |
|
@@ -1613,7 +1790,7 @@ if auth_status == True:
|
|
1613 |
channel_name_upper_bound = f"{channel_name}_upper_bound"
|
1614 |
|
1615 |
if channel_name_upper_bound not in st.session_state:
|
1616 |
-
st.session_state[channel_name_upper_bound] = str(
|
1617 |
|
1618 |
channel_bounds_max = st.text_input(
|
1619 |
"Upper Bound Percentage",
|
@@ -1842,7 +2019,7 @@ if auth_status == True:
|
|
1842 |
# current_channel_spends,
|
1843 |
# )
|
1844 |
|
1845 |
-
# # print(st.session_state["acutual_predicted"])
|
1846 |
summary_df = pd.DataFrame(st.session_state["acutual_predicted"])
|
1847 |
# st.dataframe(summary_df)
|
1848 |
summary_df.drop_duplicates(subset="Channel_name", keep="last", inplace=True)
|
@@ -2085,6 +2262,36 @@ if auth_status == True:
|
|
2085 |
disabled=len(st.session_state["scenario_input"]) == 0,#use_container_width=True
|
2086 |
|
2087 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2088 |
|
2089 |
|
2090 |
|
@@ -2108,3 +2315,5 @@ if auth_status != True:
|
|
2108 |
st.error("Username not found")
|
2109 |
except Exception as e:
|
2110 |
st.error(e)
|
|
|
|
|
|
40 |
load_local_css("styles.css")
|
41 |
set_header()
|
42 |
|
43 |
+
from pptx import Presentation
|
44 |
+
from pptx.util import Inches
|
45 |
+
from io import BytesIO
|
46 |
+
import plotly.io as pio
|
47 |
+
|
48 |
|
49 |
for k, v in st.session_state.items():
|
50 |
if k not in ["logout", "login", "config"] and not k.startswith("FormSubmitter"):
|
|
|
53 |
# ======================= Functions ====================== #
|
54 |
# ======================================================== #
|
55 |
|
56 |
+
|
57 |
+
def save_ppt_file(summary_df_sorted,fig1,fig2,fig3):
|
58 |
+
summary_df_sorted.index = summary_df_sorted["Channel_name"]
|
59 |
+
# Initialize PowerPoint presentation
|
60 |
+
prs = Presentation()
|
61 |
+
# Helper function to add Plotly figure to slide
|
62 |
+
def add_plotly_chart_to_slide(slide, fig, left, top, width, height):
|
63 |
+
img_stream = BytesIO()
|
64 |
+
pio.write_image(fig, img_stream, format='png',engine="orca")
|
65 |
+
slide.shapes.add_picture(img_stream, left, top, width, height)
|
66 |
+
|
67 |
+
for i in range(0,len(channels_list)):
|
68 |
+
# print(channels_list[i])
|
69 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
70 |
+
fig = rc.response_curves(channels_list[i], summary_df_sorted["Optimized_spend"][channels_list[i]], summary_df_sorted["New_sales"][channels_list[i]])
|
71 |
+
add_plotly_chart_to_slide(slide_1, fig, Inches(0.1), Inches(0.1), width=Inches(9), height=Inches(7))
|
72 |
+
|
73 |
+
# Update layout
|
74 |
+
fig1.update_layout(
|
75 |
+
legend=dict(
|
76 |
+
orientation="h", # Horizontal orientation
|
77 |
+
yanchor="top", # Anchor the legend at the top
|
78 |
+
y=-0.4, # Position the legend below the plot area
|
79 |
+
xanchor="center", # Center the legend horizontally
|
80 |
+
x=0.5 # Center the legend on the x-axis
|
81 |
+
)
|
82 |
+
)
|
83 |
+
# Update layout
|
84 |
+
fig2.update_layout(
|
85 |
+
legend=dict(
|
86 |
+
orientation="h", # Horizontal orientation
|
87 |
+
yanchor="top", # Anchor the legend at the top
|
88 |
+
y=-0.4, # Position the legend below the plot area
|
89 |
+
xanchor="center", # Center the legend horizontally
|
90 |
+
x=0.5 # Center the legend on the x-axis
|
91 |
+
)
|
92 |
+
)
|
93 |
+
# Update layout
|
94 |
+
fig3.update_layout(
|
95 |
+
legend=dict(
|
96 |
+
orientation="h", # Horizontal orientation
|
97 |
+
yanchor="top", # Anchor the legend at the top
|
98 |
+
y=-0.4, # Position the legend below the plot area
|
99 |
+
xanchor="center", # Center the legend horizontally
|
100 |
+
x=0.5 # Center the legend on the x-axis
|
101 |
+
)
|
102 |
+
)
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
107 |
+
|
108 |
+
add_plotly_chart_to_slide(slide_1, fig1, Inches(0.1), Inches(1), width=Inches(9.5), height=Inches(6))
|
109 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
110 |
+
add_plotly_chart_to_slide(slide_1, fig2, Inches(0.1), Inches(1), width=Inches(9.5), height=Inches(6))
|
111 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
112 |
+
add_plotly_chart_to_slide(slide_1, fig3, Inches(0.1), Inches(1), width=Inches(9.5), height=Inches(6))
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
# Save to a BytesIO object
|
117 |
+
ppt_stream = BytesIO()
|
118 |
+
prs.save(ppt_stream)
|
119 |
+
ppt_stream.seek(0)
|
120 |
+
|
121 |
+
return ppt_stream.getvalue()
|
122 |
+
|
123 |
def first_day_of_next_year(date):
|
124 |
next_year = date.year + 1
|
125 |
first_day = datetime(next_year, 1, 1).date()
|
|
|
163 |
if key.lower() == "media spends":
|
164 |
with status_placeholder:
|
165 |
with st.spinner("Optimizing"):
|
166 |
+
# # # print(channel_list)
|
167 |
+
# # # print(st.session_state["total_spends_change"])
|
168 |
result = st.session_state["scenario"].optimize(
|
169 |
st.session_state["total_spends_change"], channel_list
|
170 |
# result = st.session_state["scenario"].spends_optimisation(
|
171 |
# st.session_state["total_spends_change"], channel_list
|
172 |
)
|
173 |
+
# print("")
|
174 |
+
# print(list(zip(*result)))
|
175 |
|
176 |
|
177 |
|
|
|
214 |
st.session_state["scenario"]
|
215 |
)
|
216 |
st.session_state["scenario_input"] = ""
|
217 |
+
# # # print(type(st.session_state['saved_scenarios']))
|
218 |
with open("../saved_scenarios.pkl", "wb") as f:
|
219 |
pickle.dump(st.session_state["saved_scenarios"], f)
|
220 |
|
|
|
291 |
# def update_all_spends_abs_slider():
|
292 |
# actual_spends = _scenario.actual_total_spends
|
293 |
# if validate_input(st.session_state["total_spends_change_abs_slider"]):
|
294 |
+
# # # print("#" * 100)
|
295 |
+
# # # print(st.session_state["total_spends_change_abs_slider"])C:\Users\PragyaJatav\Downloads\Untitled Folder 2\simulatorAldi\pages\8_Scenario_Planner.py
|
296 |
+
# # # print("#" * 100)
|
297 |
|
298 |
# modified_spends = extract_number_for_string(
|
299 |
# st.session_state["total_spends_change_abs_slider"]
|
|
|
309 |
|
310 |
|
311 |
def update_all_spends_abs():
|
312 |
+
st.write("aon update spends abs")
|
313 |
if (
|
314 |
st.session_state["total_spends_change_abs"]
|
315 |
in st.session_state["total_spends_change_abs_slider_options"]
|
|
|
317 |
st.session_state["allow_spends_update"] = True
|
318 |
else:
|
319 |
st.session_state["allow_spends_update"] = False
|
320 |
+
# st.warning("Invalid Input")
|
321 |
|
322 |
actual_spends = _scenario.actual_total_spends
|
323 |
if (
|
|
|
491 |
|
492 |
|
493 |
def reset_scenario(panel_selected, file_selected, updated_rcs):
|
494 |
+
# ## # print(st.session_state['default_scenario_dict'])
|
495 |
# st.session_state['scenario'] = class_from_dict(st.session_state['default_scenario_dict'])
|
496 |
# for channel in st.session_state['scenario'].channels.values():
|
497 |
# st.session_state[channel.name] = float(channel.actual_total_spends * channel.conversion_rate)
|
|
|
630 |
|
631 |
|
632 |
def debug_temp(x_test, power, K, b, a, x0):
|
633 |
+
# # # print("*" * 100)
|
634 |
# Calculate the count of bins
|
635 |
count_lower_bin = sum(1 for x in x_test if x <= 2524)
|
636 |
count_center_bin = sum(1 for x in x_test if x > 2524 and x <= 3377)
|
637 |
count_ = sum(1 for x in x_test if x > 3377)
|
638 |
|
639 |
+
# # # print(
|
640 |
# f"""
|
641 |
# lower : {count_lower_bin}
|
642 |
# center : {count_center_bin}
|
|
|
670 |
'Audio',
|
671 |
'Email']
|
672 |
summary_df_sorted.index = summary_df_sorted["Channel_name"]
|
673 |
+
figures = [rc.response_curves(channels_list[i], summary_df_sorted["Optimized_spend"][channels_list[i]], summary_df_sorted["New_sales"][channels_list[i]]) for i in range(13)]
|
674 |
|
675 |
# for i in range()
|
676 |
|
|
|
721 |
params = pd.read_excel(r"response_curves_parameters.xlsx",index_col = "channel")
|
722 |
param_dicts = {col: params[col].to_dict() for col in params.columns}
|
723 |
df.index = df.channel
|
724 |
+
# # # print(param_dicts)
|
725 |
for col in df.channel:
|
726 |
x = df["Spends"][col]
|
727 |
dividing_rate = 104
|
728 |
# st.write(x)
|
729 |
x_inp = ( x/dividing_rate- param_dicts["x_min"][col]) / (param_dicts["x_max"][col] - param_dicts["x_min"][col])
|
730 |
x_out = x_inp**param_dicts["n"][col] / (param_dicts["Kd"][col]**param_dicts["n"][col] + x_inp**param_dicts["n"][col]) #self.hill_equation(x_inp,Kd, n)
|
731 |
+
# # # print("x_out",x_out)
|
732 |
|
733 |
|
734 |
x_val_inv = (x_out*param_dicts["x_max"][col] + (1 - x_out) * param_dicts["x_min"][col])
|
|
|
739 |
# x_out = x**param_dicts["n"][col]/(param_dicts["Kd"][col]**param_dicts["n"][col]+ x**param_dicts["n"][col])
|
740 |
# x_out_inv = (x_out*(param_dicts["y_max"][col]-param_dicts["y_min"][col])+param_dicts["y_min"][col])*104
|
741 |
df["Prospects"][col] = sales
|
742 |
+
# # # print(df)
|
743 |
return df
|
744 |
|
745 |
|
|
|
855 |
|
856 |
st.session_state["initialized"] = False
|
857 |
|
858 |
+
def scenario_planner_plots2():
|
859 |
+
import plotly.graph_objects as go
|
860 |
+
from plotly.subplots import make_subplots
|
861 |
+
|
862 |
+
|
863 |
+
with open('summary_df.pkl', 'rb') as file:
|
864 |
+
summary_df_sorted = pickle.load(file)
|
865 |
+
#st.write(summary_df_sorted)
|
866 |
+
|
867 |
+
# selected_scenario= st.selectbox('Select Saved Scenarios',['S1','S2'])
|
868 |
+
summary_df_sorted=summary_df_sorted.sort_values(by=['Optimized_spend'],ascending=False)
|
869 |
+
summary_df_sorted['old_efficiency']=(summary_df_sorted['Old_sales']/summary_df_sorted['Old_sales'].sum())/(summary_df_sorted['Actual_spend']/summary_df_sorted['Actual_spend'].sum())
|
870 |
+
summary_df_sorted['new_efficiency']=(summary_df_sorted['New_sales']/summary_df_sorted['New_sales'].sum())/(summary_df_sorted['Optimized_spend']/summary_df_sorted['Optimized_spend'].sum())
|
871 |
+
summary_df_sorted['old_roi']=summary_df_sorted['Old_sales']/summary_df_sorted['Actual_spend']
|
872 |
+
summary_df_sorted['new_roi']=summary_df_sorted['New_sales']/summary_df_sorted['Optimized_spend']
|
873 |
+
|
874 |
+
total_actual_spend = summary_df_sorted['Actual_spend'].sum()
|
875 |
+
total_optimized_spend = summary_df_sorted['Optimized_spend'].sum()
|
876 |
+
actual_spend_percentage = (summary_df_sorted['Actual_spend'] / total_actual_spend) * 100
|
877 |
+
optimized_spend_percentage = (summary_df_sorted['Optimized_spend'] / total_optimized_spend) * 100
|
878 |
+
|
879 |
+
|
880 |
+
|
881 |
+
light_blue = 'rgba(0, 31, 120, 0.7)'
|
882 |
+
light_orange = 'rgba(0, 181, 219, 0.7)'
|
883 |
+
light_green = 'rgba(240, 61, 20, 0.7)'
|
884 |
+
light_red = 'rgba(250, 110, 10, 0.7)'
|
885 |
+
light_purple = 'rgba(255, 191, 69, 0.7)'
|
886 |
+
|
887 |
+
fig1 = go.Figure()
|
888 |
+
# Add actual vs optimized spend bars
|
889 |
+
|
890 |
+
|
891 |
+
fig1.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Actual_spend'], name='Actual',
|
892 |
+
text=summary_df_sorted['Actual_spend'].apply(format_number) + ' '
|
893 |
+
# +
|
894 |
+
# ' '+
|
895 |
+
# '</br> (' + actual_spend_percentage.astype(int).astype(str) + '%)'
|
896 |
+
,textposition='outside',#textfont=dict(size=30),
|
897 |
+
marker_color=light_blue))
|
898 |
+
|
899 |
+
|
900 |
+
fig1.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Optimized_spend'], name='Optimized',
|
901 |
+
text=summary_df_sorted['Optimized_spend'].apply(format_number) + ' '
|
902 |
+
# +
|
903 |
+
# '</br> (' + optimized_spend_percentage.astype(int).astype(str) + '%)'
|
904 |
+
,textposition='outside',#textfont=dict(size=30),
|
905 |
+
marker_color=light_orange))
|
906 |
+
|
907 |
+
fig1.update_xaxes(title_text="Channels")
|
908 |
+
fig1.update_yaxes(title_text="Spends ($)")
|
909 |
+
fig1.update_layout(
|
910 |
+
title = "Actual vs. Optimized Spends",
|
911 |
+
margin=dict(t=40, b=40, l=40, r=40)
|
912 |
+
)
|
913 |
+
|
914 |
+
# st.plotly_chart(fig1,use_container_width=True)
|
915 |
+
|
916 |
+
# Add actual vs optimized Contribution
|
917 |
+
fig2 = go.Figure()
|
918 |
+
fig2.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Old_sales'],
|
919 |
+
name='Actual Contribution',text=summary_df_sorted['Old_sales'].apply(format_number),textposition='outside',
|
920 |
+
marker_color=light_blue,showlegend=True))
|
921 |
+
|
922 |
+
fig2.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['New_sales'],
|
923 |
+
name='Optimized Contribution',text=summary_df_sorted['New_sales'].apply(format_number),textposition='outside',
|
924 |
+
marker_color=light_orange, showlegend=True))
|
925 |
+
|
926 |
+
|
927 |
+
|
928 |
+
fig2.update_yaxes(title_text="Contribution")
|
929 |
+
fig2.update_xaxes(title_text="Channels")
|
930 |
+
fig2.update_layout(
|
931 |
+
title = "Actual vs. Optimized Contributions",
|
932 |
+
margin=dict(t=40, b=40, l=40, r=40)
|
933 |
+
# yaxis=dict(range=[0, 0.002]),
|
934 |
+
)
|
935 |
+
# st.plotly_chart(fig2,use_container_width=True)
|
936 |
+
|
937 |
+
# Add actual vs optimized Efficiency bars
|
938 |
+
fig3 = go.Figure()
|
939 |
+
summary_df_sorted_p = summary_df_sorted[summary_df_sorted['Channel_name']!="Panel"]
|
940 |
+
fig3.add_trace(go.Bar(x=summary_df_sorted_p['Channel_name'].apply(channel_name_formating), y=summary_df_sorted_p['old_efficiency'],
|
941 |
+
name='Actual Efficiency', text=summary_df_sorted_p['old_efficiency'].apply(format_number) ,textposition='outside',
|
942 |
+
marker_color=light_blue,showlegend=True))
|
943 |
+
fig3.add_trace(go.Bar(x=summary_df_sorted_p['Channel_name'].apply(channel_name_formating), y=summary_df_sorted_p['new_efficiency'],
|
944 |
+
name='Optimized Efficiency',text=summary_df_sorted_p['new_efficiency'].apply(format_number),textposition='outside' ,
|
945 |
+
marker_color=light_orange,showlegend=True))
|
946 |
+
|
947 |
+
fig3.update_xaxes(title_text="Channels")
|
948 |
+
fig3.update_yaxes(title_text="Efficiency")
|
949 |
+
fig3.update_layout(
|
950 |
+
title = "Actual vs. Optimized Efficiency",
|
951 |
+
margin=dict(t=40, b=40, l=40, r=40),
|
952 |
+
# yaxis=dict(range=[0, 0.002]),
|
953 |
+
)
|
954 |
+
|
955 |
+
# st.plotly_chart(fig3,use_container_width=True)
|
956 |
+
return fig1,fig2,fig3
|
957 |
def scenario_planner_plots():
|
958 |
with st.expander('Optimized Spends Overview'):
|
959 |
# if st.button('Refresh'):
|
|
|
996 |
light_red = 'rgba(250, 110, 10, 0.7)'
|
997 |
light_purple = 'rgba(255, 191, 69, 0.7)'
|
998 |
|
999 |
+
fig1 = go.Figure()
|
1000 |
# Add actual vs optimized spend bars
|
1001 |
|
1002 |
|
1003 |
+
fig1.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Actual_spend'], name='Actual',
|
1004 |
text=summary_df_sorted['Actual_spend'].apply(format_number) + ' '
|
1005 |
# +
|
1006 |
# ' '+
|
|
|
1009 |
marker_color=light_blue))
|
1010 |
|
1011 |
|
1012 |
+
fig1.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Optimized_spend'], name='Optimized',
|
1013 |
text=summary_df_sorted['Optimized_spend'].apply(format_number) + ' '
|
1014 |
# +
|
1015 |
# '</br> (' + optimized_spend_percentage.astype(int).astype(str) + '%)'
|
1016 |
,textposition='outside',#textfont=dict(size=30),
|
1017 |
marker_color=light_orange))
|
1018 |
|
1019 |
+
fig1.update_xaxes(title_text="Channels")
|
1020 |
+
fig1.update_yaxes(title_text="Spends ($)")
|
1021 |
+
fig1.update_layout(
|
1022 |
title = "Actual vs. Optimized Spends",
|
1023 |
margin=dict(t=40, b=40, l=40, r=40)
|
1024 |
)
|
1025 |
|
1026 |
+
st.plotly_chart(fig1,use_container_width=True)
|
1027 |
|
1028 |
# Add actual vs optimized Contribution
|
1029 |
+
fig2 = go.Figure()
|
1030 |
+
fig2.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['Old_sales'],
|
1031 |
name='Actual Contribution',text=summary_df_sorted['Old_sales'].apply(format_number),textposition='outside',
|
1032 |
marker_color=light_blue,showlegend=True))
|
1033 |
|
1034 |
+
fig2.add_trace(go.Bar(x=summary_df_sorted['Channel_name'].apply(channel_name_formating), y=summary_df_sorted['New_sales'],
|
1035 |
name='Optimized Contribution',text=summary_df_sorted['New_sales'].apply(format_number),textposition='outside',
|
1036 |
marker_color=light_orange, showlegend=True))
|
1037 |
|
1038 |
|
1039 |
|
1040 |
+
fig2.update_yaxes(title_text="Contribution")
|
1041 |
+
fig2.update_xaxes(title_text="Channels")
|
1042 |
+
fig2.update_layout(
|
1043 |
title = "Actual vs. Optimized Contributions",
|
1044 |
margin=dict(t=40, b=40, l=40, r=40)
|
1045 |
# yaxis=dict(range=[0, 0.002]),
|
1046 |
)
|
1047 |
+
st.plotly_chart(fig2,use_container_width=True)
|
1048 |
|
1049 |
# Add actual vs optimized Efficiency bars
|
1050 |
+
fig3 = go.Figure()
|
1051 |
summary_df_sorted_p = summary_df_sorted[summary_df_sorted['Channel_name']!="Panel"]
|
1052 |
+
fig3.add_trace(go.Bar(x=summary_df_sorted_p['Channel_name'].apply(channel_name_formating), y=summary_df_sorted_p['old_efficiency'],
|
1053 |
name='Actual Efficiency', text=summary_df_sorted_p['old_efficiency'].apply(format_number) ,textposition='outside',
|
1054 |
marker_color=light_blue,showlegend=True))
|
1055 |
+
fig3.add_trace(go.Bar(x=summary_df_sorted_p['Channel_name'].apply(channel_name_formating), y=summary_df_sorted_p['new_efficiency'],
|
1056 |
name='Optimized Efficiency',text=summary_df_sorted_p['new_efficiency'].apply(format_number),textposition='outside' ,
|
1057 |
marker_color=light_orange,showlegend=True))
|
1058 |
|
1059 |
+
fig3.update_xaxes(title_text="Channels")
|
1060 |
+
fig3.update_yaxes(title_text="Efficiency")
|
1061 |
+
fig3.update_layout(
|
1062 |
title = "Actual vs. Optimized Efficiency",
|
1063 |
margin=dict(t=40, b=40, l=40, r=40),
|
1064 |
# yaxis=dict(range=[0, 0.002]),
|
1065 |
)
|
1066 |
|
1067 |
+
st.plotly_chart(fig3,use_container_width=True)
|
1068 |
+
return fig1,fig2,fig3
|
1069 |
|
1070 |
def give_demo():
|
1071 |
def get_file_bytes(file_path):
|
|
|
1209 |
# ========================== UI ========================== #
|
1210 |
# ======================================================== #
|
1211 |
|
1212 |
+
# # # print(list(st.session_state.keys()))
|
1213 |
main_header = st.columns((2, 2))
|
1214 |
sub_header = st.columns((1, 1, 1, 1))
|
1215 |
_scenario = st.session_state["scenario"]
|
|
|
1245 |
st.session_state["lower_bound_key"] = 10
|
1246 |
|
1247 |
if "upper_bound_key" not in st.session_state:
|
1248 |
+
st.session_state["upper_bound_key"] = 100
|
1249 |
|
1250 |
# st.write(_scenario.modified_total_sales)
|
1251 |
header_df = pd.DataFrame(index=["Actual","Simulated","Change","Percent Change"],columns=["Spends","Prospects"])
|
|
|
1551 |
)
|
1552 |
|
1553 |
with _columns1[2]:
|
1554 |
+
#
|
1555 |
+
|
1556 |
+
|
|
|
|
|
|
|
1557 |
|
1558 |
optimize_placeholder = st.empty()
|
1559 |
|
|
|
1570 |
|
1571 |
_columns2 = st.columns((2, 2, 2,2))
|
1572 |
if st.session_state["optimization_key"] == "Media Spends":
|
1573 |
+
# st.write(overall_lower_bound,overall_upper_bound)
|
1574 |
+
|
1575 |
+
with _columns2[2]:
|
1576 |
+
overall_lower_bound = st.number_input(
|
1577 |
+
"Overall Lower Bound for Spends",
|
1578 |
+
value = 50.0,
|
1579 |
+
key = "overall_lower_bound",
|
1580 |
+
# on_change=partial(update_data_bound_min_overall)
|
1581 |
+
)
|
1582 |
+
with _columns2[3]:
|
1583 |
+
overall_upper_bound = st.number_input(
|
1584 |
+
"Overall Upper Bound for Spends",
|
1585 |
+
value = 50.0,
|
1586 |
+
key = "overall_upper_bound",
|
1587 |
+
# on_change=partial(update_data_bound_max_overall)
|
1588 |
+
)
|
1589 |
+
min_value = round(_scenario.actual_total_spends * (1-overall_lower_bound/100))
|
1590 |
+
max_value = round(_scenario.actual_total_spends * (1-overall_upper_bound/100))
|
1591 |
with _columns2[0]:
|
1592 |
spend_input = st.text_input(
|
1593 |
"Absolute",
|
|
|
1595 |
# label_visibility="collapsed",
|
1596 |
on_change=update_all_spends_abs,
|
1597 |
)
|
1598 |
+
# overall_lower_bound = 50.0
|
1599 |
+
# overall_upper_bound = 50.0
|
1600 |
with _columns2[1]:
|
1601 |
st.number_input(
|
1602 |
"Percent Change",
|
1603 |
key="total_spends_change",
|
1604 |
+
min_value= -1*overall_lower_bound,
|
1605 |
+
max_value= overall_upper_bound,
|
1606 |
+
step=0.01,
|
1607 |
value=0.00,
|
1608 |
on_change=update_spends,
|
1609 |
)
|
1610 |
|
1611 |
+
|
1612 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1613 |
st.session_state["total_spends_change_abs_slider_options"] = [
|
1614 |
numerize(value, 1)
|
1615 |
for value in range(min_value, max_value + 1, int(1e4))
|
|
|
1637 |
key="total_sales_change",
|
1638 |
min_value=-50.00,
|
1639 |
max_value=50.00,
|
1640 |
+
step=0.01,
|
1641 |
|
1642 |
value=0.00,
|
1643 |
on_change=update_sales,
|
|
|
1753 |
# st.write(channel_spends)
|
1754 |
# st.write(min_value)
|
1755 |
# st.write(max_value)
|
1756 |
+
### # print(st.session_state[channel_name])
|
1757 |
+
# st.write(_channel_class.channel_bounds_min,channel_bounds_min)
|
1758 |
+
# st.write(_channel_class.channel_bounds_max,channel_bounds_max)
|
1759 |
_columns_min = st.columns(2)
|
1760 |
with _columns_min[0]:
|
1761 |
spend_input = st.text_input(
|
|
|
1763 |
key=channel_name,
|
1764 |
# label_visibility="collapsed",
|
1765 |
on_change=partial(update_data, channel_name),
|
1766 |
+
|
1767 |
)
|
1768 |
channel_name_lower_bound = f"{channel_name}_lower_bound"
|
1769 |
|
|
|
1790 |
channel_name_upper_bound = f"{channel_name}_upper_bound"
|
1791 |
|
1792 |
if channel_name_upper_bound not in st.session_state:
|
1793 |
+
st.session_state[channel_name_upper_bound] = str(100)
|
1794 |
|
1795 |
channel_bounds_max = st.text_input(
|
1796 |
"Upper Bound Percentage",
|
|
|
2019 |
# current_channel_spends,
|
2020 |
# )
|
2021 |
|
2022 |
+
# # # print(st.session_state["acutual_predicted"])
|
2023 |
summary_df = pd.DataFrame(st.session_state["acutual_predicted"])
|
2024 |
# st.dataframe(summary_df)
|
2025 |
summary_df.drop_duplicates(subset="Channel_name", keep="last", inplace=True)
|
|
|
2262 |
disabled=len(st.session_state["scenario_input"]) == 0,#use_container_width=True
|
2263 |
|
2264 |
)
|
2265 |
+
# def prepare_download_func():
|
2266 |
+
|
2267 |
+
# fig1,fig2,fig3 = scenario_planner_plots()
|
2268 |
+
|
2269 |
+
# ppt_file = save_ppt_file(summary_df_sorted,fig1,fig2,fig3)
|
2270 |
+
|
2271 |
+
if st.button("Prepare Analysis Download"):
|
2272 |
+
fig1,fig2,fig3 = scenario_planner_plots2()
|
2273 |
+
ppt_file = save_ppt_file(summary_df_sorted,fig1,fig2,fig3)
|
2274 |
+
# Add a download button
|
2275 |
+
try:
|
2276 |
+
# ppt_file = prepare_download_func()
|
2277 |
+
st.download_button(
|
2278 |
+
label="Download Response Curves And Optimised Spends Overview",
|
2279 |
+
data=ppt_file,
|
2280 |
+
file_name="MMM_Scenario_Planner_Presentation.pptx",
|
2281 |
+
mime="application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
2282 |
+
)
|
2283 |
+
except:
|
2284 |
+
st.write("")
|
2285 |
+
# ppt_file = save_ppt_file()
|
2286 |
+
# # Add a download button
|
2287 |
+
# st.download_button(
|
2288 |
+
# label="Download Analysis",
|
2289 |
+
# data=ppt_file,
|
2290 |
+
# file_name="MMM_Model_Quality_Presentation.pptx",
|
2291 |
+
# mime="application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
2292 |
+
# )
|
2293 |
+
|
2294 |
+
|
2295 |
|
2296 |
|
2297 |
|
|
|
2315 |
st.error("Username not found")
|
2316 |
except Exception as e:
|
2317 |
st.error(e)
|
2318 |
+
|
2319 |
+
|
pages/3_Saved_Scenarios.py
CHANGED
@@ -16,6 +16,11 @@ from yaml import SafeLoader
|
|
16 |
from classes import class_from_dict
|
17 |
import plotly.graph_objects as go
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
st.set_page_config(layout='wide')
|
20 |
load_local_css('styles.css')
|
21 |
set_header()
|
@@ -24,6 +29,9 @@ st.title("Saved Scenarios")
|
|
24 |
# for k, v in st.session_state.items():
|
25 |
# if k not in ['logout', 'login','config'] and not k.startswith('FormSubmitter'):
|
26 |
# st.session_state[k] = v
|
|
|
|
|
|
|
27 |
def comparison_scenarios_df():
|
28 |
|
29 |
## create summary page
|
@@ -32,7 +40,7 @@ def comparison_scenarios_df():
|
|
32 |
summary_df_spend = None
|
33 |
summary_df_prospect = None
|
34 |
# summary_df_efficiency = None
|
35 |
-
#=# print(scenarios_to_download)
|
36 |
for scenario_name in scenarios_to_compare:
|
37 |
scenario_dict = st.session_state['saved_scenarios'][scenario_name]
|
38 |
_spends = []
|
@@ -96,14 +104,65 @@ def comparison_scenarios_df():
|
|
96 |
efficiency_df[c] = efficiency_df[c].round(2)
|
97 |
|
98 |
return summary_df_spend,summary_df_prospect,efficiency_df
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
# Create traces for each column
|
105 |
traces = []
|
106 |
-
for column in df.columns:
|
|
|
|
|
107 |
traces.append(go.Bar(
|
108 |
x=df.index,
|
109 |
y=df[column],
|
@@ -111,6 +170,7 @@ def plot_comparison_chart(df,metric):
|
|
111 |
text=df[column].apply(numerize), # Adding text for each point
|
112 |
textposition='auto',
|
113 |
hoverinfo='x+y+text',
|
|
|
114 |
))
|
115 |
|
116 |
# Create the layout
|
@@ -124,16 +184,79 @@ def plot_comparison_chart(df,metric):
|
|
124 |
# Create the figure
|
125 |
fig = go.Figure(data=traces, layout=layout)
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
return fig
|
128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
def create_comparison_plots():
|
130 |
# comparison_scenarios_df()
|
131 |
spends_df, prospects_df, efficiency_df = comparison_scenarios_df()
|
132 |
# st.dataframe(spends_df)
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
def create_scenario_summary(scenario_dict):
|
138 |
summary_rows = []
|
139 |
actual_total_spends = scenario_dict.get('actual_total_spends'),
|
@@ -295,7 +418,7 @@ def download_scenarios():
|
|
295 |
wb.remove(wb.active)
|
296 |
st.session_state['xlsx_buffer'] = io.BytesIO()
|
297 |
summary_df = None
|
298 |
-
## print(scenarios_to_download)
|
299 |
for scenario_name in scenarios_to_download:
|
300 |
scenario_dict = st.session_state['saved_scenarios'][scenario_name]
|
301 |
_spends = []
|
@@ -385,7 +508,7 @@ auth_status = st.session_state.get('authentication_status')
|
|
385 |
if auth_status == True:
|
386 |
is_state_initiaized = st.session_state.get('initialized',False)
|
387 |
if not is_state_initiaized:
|
388 |
-
## print("Scenario page state reloaded")
|
389 |
initialize_data(target_file = "Overview_data_test_panel@#prospects.xlsx")
|
390 |
|
391 |
|
|
|
16 |
from classes import class_from_dict
|
17 |
import plotly.graph_objects as go
|
18 |
|
19 |
+
from pptx import Presentation
|
20 |
+
from pptx.util import Inches
|
21 |
+
from io import BytesIO
|
22 |
+
import plotly.io as pio
|
23 |
+
|
24 |
st.set_page_config(layout='wide')
|
25 |
load_local_css('styles.css')
|
26 |
set_header()
|
|
|
29 |
# for k, v in st.session_state.items():
|
30 |
# if k not in ['logout', 'login','config'] and not k.startswith('FormSubmitter'):
|
31 |
# st.session_state[k] = v
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
def comparison_scenarios_df():
|
36 |
|
37 |
## create summary page
|
|
|
40 |
summary_df_spend = None
|
41 |
summary_df_prospect = None
|
42 |
# summary_df_efficiency = None
|
43 |
+
#=# # print(scenarios_to_download)
|
44 |
for scenario_name in scenarios_to_compare:
|
45 |
scenario_dict = st.session_state['saved_scenarios'][scenario_name]
|
46 |
_spends = []
|
|
|
104 |
efficiency_df[c] = efficiency_df[c].round(2)
|
105 |
|
106 |
return summary_df_spend,summary_df_prospect,efficiency_df
|
107 |
+
import matplotlib.colors as mcolors
|
108 |
+
import plotly.colors as pc
|
109 |
+
|
110 |
+
def rgb_to_hex(rgb):
|
111 |
+
"""Convert RGB tuple to hex color."""
|
112 |
+
return mcolors.to_hex(rgb)
|
113 |
+
|
114 |
+
def generate_color_gradient(start_color, end_color, num_colors):
|
115 |
+
"""Generate a list of hex color codes transitioning from start_color to end_color."""
|
116 |
+
if num_colors == 1:
|
117 |
+
return [start_color]
|
118 |
+
# Define the color scale from start to end color using hex codes
|
119 |
+
colorscale = [[0, start_color], [1, end_color]]
|
120 |
|
121 |
+
# Generate the colors
|
122 |
+
colors = pc.sample_colorscale(
|
123 |
+
colorscale,
|
124 |
+
[i / (num_colors - 1) for i in range(num_colors)],
|
125 |
+
colortype='hex' # Use 'rgb' to get colors in RGB format
|
126 |
+
)
|
127 |
+
|
128 |
+
# print(colors)
|
129 |
+
# Convert RGB tuples to hex
|
130 |
+
# hex_colors = [rgb_to_hex(color) for color in colors]
|
131 |
+
return colors
|
132 |
+
|
133 |
+
# def generate_color_gradient(start_color, end_color, num_colors):
|
134 |
+
# import plotly.express as px
|
135 |
+
# """Generate a list of hex color codes transitioning from start_color to end_color."""
|
136 |
+
# colors = px.colors.sequential.Plasma # Using a built-in color scale
|
137 |
+
# custom_colors = px.colors.sample_colorscale(
|
138 |
+
# colorscale=[[0, start_color], [1, end_color]],
|
139 |
+
# n_colors=num_colors
|
140 |
+
# )
|
141 |
+
# return custom_colors
|
142 |
+
|
143 |
+
def plot_comparison_chart(df,metric,custom_colors):
|
144 |
+
# print(metric)
|
145 |
+
# print(custom_colors)
|
146 |
+
custom_colors = [
|
147 |
+
"#4169E1", # Royal Blue
|
148 |
+
"#ADD8E6", # Light Blue
|
149 |
+
"#FF7F50" , # Coral
|
150 |
+
"#87CEEB", # Sky Blue
|
151 |
+
"#FA8072", # Salmon
|
152 |
+
"#1E90FF", # Dodger Blue
|
153 |
+
|
154 |
+
"#00008B" ,
|
155 |
+
"#F08080", # Light Coral
|
156 |
+
|
157 |
+
"#FF8C00", # Dark Orange
|
158 |
+
"#FFA500", # Orange
|
159 |
+
|
160 |
+
]
|
161 |
# Create traces for each column
|
162 |
traces = []
|
163 |
+
for i,column in enumerate(df.columns):
|
164 |
+
# print(i)
|
165 |
+
# print(custom_colors[i])
|
166 |
traces.append(go.Bar(
|
167 |
x=df.index,
|
168 |
y=df[column],
|
|
|
170 |
text=df[column].apply(numerize), # Adding text for each point
|
171 |
textposition='auto',
|
172 |
hoverinfo='x+y+text',
|
173 |
+
marker_color = custom_colors[i]
|
174 |
))
|
175 |
|
176 |
# Create the layout
|
|
|
184 |
# Create the figure
|
185 |
fig = go.Figure(data=traces, layout=layout)
|
186 |
|
187 |
+
fig.update_layout(
|
188 |
+
|
189 |
+
legend=dict(
|
190 |
+
orientation="h", # Horizontal orientation
|
191 |
+
yanchor="top", # Anchor the legend at the top
|
192 |
+
y=-0.45, # Position the legend below the plot area
|
193 |
+
xanchor="center", # Center the legend horizontally
|
194 |
+
x=0.5 # Center the legend on the x-axis
|
195 |
+
)
|
196 |
+
)
|
197 |
+
|
198 |
return fig
|
199 |
|
200 |
+
def save_ppt_file(fig1,fig2,fig3):
|
201 |
+
|
202 |
+
# Initialize PowerPoint presentation
|
203 |
+
prs = Presentation()
|
204 |
+
# Helper function to add Plotly figure to slide
|
205 |
+
def add_plotly_chart_to_slide(slide, fig, left, top, width, height):
|
206 |
+
img_stream = BytesIO()
|
207 |
+
pio.write_image(fig, img_stream, format='png',engine="orca")
|
208 |
+
slide.shapes.add_picture(img_stream, left, top, width, height)
|
209 |
+
|
210 |
+
slide_1 = prs.slides.add_slide(prs.slide_layouts[6])
|
211 |
+
# title_1 = slide_1.shapes.title
|
212 |
+
# title_1.text = "Comparing Spends"
|
213 |
+
|
214 |
+
add_plotly_chart_to_slide(slide_1, fig1, Inches(0), Inches(0.25), width=Inches(10), height=Inches(6))
|
215 |
+
|
216 |
+
slide_2 = prs.slides.add_slide(prs.slide_layouts[6])
|
217 |
+
# title_2 = slide_2.shapes.title
|
218 |
+
# title_2.text = "Comparing Contributions"
|
219 |
+
add_plotly_chart_to_slide(slide_2, fig2, Inches(0), Inches(0.25), width=Inches(10), height=Inches(6))
|
220 |
+
|
221 |
+
|
222 |
+
slide_3 = prs.slides.add_slide(prs.slide_layouts[6])
|
223 |
+
# title_3 = slide_3.shapes.title
|
224 |
+
# title_3.text = "Comparing Efficiency"
|
225 |
+
add_plotly_chart_to_slide(slide_3, fig3, Inches(0), Inches(0.25), width=Inches(10), height=Inches(6))
|
226 |
+
|
227 |
+
ppt_stream = BytesIO()
|
228 |
+
prs.save(ppt_stream)
|
229 |
+
ppt_stream.seek(0)
|
230 |
+
|
231 |
+
return ppt_stream.getvalue()
|
232 |
+
|
233 |
def create_comparison_plots():
|
234 |
# comparison_scenarios_df()
|
235 |
spends_df, prospects_df, efficiency_df = comparison_scenarios_df()
|
236 |
# st.dataframe(spends_df)
|
237 |
+
blue = "#0000FF" # Blue
|
238 |
+
green = "#00FF00" # Green
|
239 |
+
red = "#FF0000" # Red
|
240 |
+
|
241 |
+
custom_colors = generate_color_gradient(blue, red, spends_df.shape[1])
|
242 |
+
st.plotly_chart(plot_comparison_chart(spends_df,"Spends",custom_colors),use_container_width=True)
|
243 |
+
st.plotly_chart(plot_comparison_chart(prospects_df,"Contributions",custom_colors),use_container_width=True)
|
244 |
+
st.plotly_chart(plot_comparison_chart(efficiency_df,"Efficiency",custom_colors),use_container_width=True)
|
245 |
|
246 |
+
fig1 = plot_comparison_chart(spends_df,"Spends",custom_colors)
|
247 |
+
fig2 = plot_comparison_chart(prospects_df,"Contributions",custom_colors)
|
248 |
+
fig3 = plot_comparison_chart(efficiency_df,"Efficiency",custom_colors)
|
249 |
+
|
250 |
+
ppt_file = save_ppt_file(fig1,fig2,fig3)
|
251 |
+
# Add a download button
|
252 |
+
st.download_button(
|
253 |
+
label="Download Comparision Analysis",
|
254 |
+
data=ppt_file,
|
255 |
+
file_name="MMM_Scenario_Comparision.pptx",
|
256 |
+
mime="application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
257 |
+
)
|
258 |
+
|
259 |
+
|
260 |
def create_scenario_summary(scenario_dict):
|
261 |
summary_rows = []
|
262 |
actual_total_spends = scenario_dict.get('actual_total_spends'),
|
|
|
418 |
wb.remove(wb.active)
|
419 |
st.session_state['xlsx_buffer'] = io.BytesIO()
|
420 |
summary_df = None
|
421 |
+
## # print(scenarios_to_download)
|
422 |
for scenario_name in scenarios_to_download:
|
423 |
scenario_dict = st.session_state['saved_scenarios'][scenario_name]
|
424 |
_spends = []
|
|
|
508 |
if auth_status == True:
|
509 |
is_state_initiaized = st.session_state.get('initialized',False)
|
510 |
if not is_state_initiaized:
|
511 |
+
## # print("Scenario page state reloaded")
|
512 |
initialize_data(target_file = "Overview_data_test_panel@#prospects.xlsx")
|
513 |
|
514 |
|
response_curves_model_quality.py
CHANGED
@@ -112,7 +112,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
112 |
for i in range(len(y_fit_inv_ext)):
|
113 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
114 |
|
115 |
-
# # print(x_ext_data)
|
116 |
ext_df = pd.DataFrame()
|
117 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
118 |
ext_df[fit_col] = y_fit_inv_v2_ext
|
@@ -125,7 +125,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
125 |
|
126 |
ext_df['MAT'] = ["ext","ext","ext"]
|
127 |
|
128 |
-
# # print(ext_df)
|
129 |
plot_df= plot_df.append(ext_df)
|
130 |
return plot_df
|
131 |
|
@@ -148,7 +148,7 @@ def input_data(df,spend_col,prospect_col):
|
|
148 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
149 |
|
150 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
151 |
-
# # print(x_max)
|
152 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
153 |
# x_ext_data = [1500000,2000000,2500000]
|
154 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
@@ -157,7 +157,7 @@ def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
|
157 |
for i in range(len(x_scaled)):
|
158 |
x_data.append(x_scaled[i][0])
|
159 |
|
160 |
-
# # print(x_data)
|
161 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
162 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
163 |
|
@@ -170,8 +170,8 @@ def fit_data(spend_col,prospect_col,channel):
|
|
170 |
|
171 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
172 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
173 |
-
# # print('k: ',Kd_fit)
|
174 |
-
# # print('n: ', n_fit)
|
175 |
|
176 |
##### extend_s_curve
|
177 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
@@ -183,7 +183,7 @@ plotly_data = fit_data(spend_cols[0],prospect_cols[0],channel_cols[0])
|
|
183 |
plotly_data.tail()
|
184 |
|
185 |
for i in range(1,13):
|
186 |
-
# # print(i)
|
187 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
188 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
189 |
|
@@ -211,7 +211,7 @@ def response_curves(channel,x_modified,y_modified):
|
|
211 |
|
212 |
plotly_data2 = plotly_data.copy()
|
213 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
214 |
-
print(plotly_data[plotly_data2['Date'] == plotly_data2['Date'].max()][x_col])
|
215 |
# .dropna(subset=[x_col]).reset_index(inplace = True)
|
216 |
fig.add_trace(go.Scatter(
|
217 |
x=plotly_data[plotly_data2['Date'] == plotly_data2['Date'].max()][x_col],
|
@@ -358,7 +358,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
358 |
for i in range(len(y_fit_inv_ext)):
|
359 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
360 |
|
361 |
-
# # print(x_ext_data)
|
362 |
ext_df = pd.DataFrame()
|
363 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
364 |
ext_df[fit_col] = y_fit_inv_v2_ext
|
@@ -371,7 +371,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
371 |
|
372 |
ext_df['MAT'] = ["ext","ext","ext"]
|
373 |
|
374 |
-
# # print(ext_df)
|
375 |
plot_df= plot_df.append(ext_df)
|
376 |
return plot_df
|
377 |
|
@@ -394,7 +394,7 @@ def input_data(df,spend_col,prospect_col):
|
|
394 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
395 |
|
396 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
397 |
-
# # print(x_max)
|
398 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
399 |
# x_ext_data = [1500000,2000000,2500000]
|
400 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
@@ -403,7 +403,7 @@ def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
|
403 |
for i in range(len(x_scaled)):
|
404 |
x_data.append(x_scaled[i][0])
|
405 |
|
406 |
-
# # print(x_data)
|
407 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
408 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
409 |
|
@@ -416,8 +416,8 @@ def fit_data(spend_col,prospect_col,channel):
|
|
416 |
|
417 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
418 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
419 |
-
# # print('k: ',Kd_fit)
|
420 |
-
# # print('n: ', n_fit)
|
421 |
|
422 |
##### extend_s_curve
|
423 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
@@ -429,7 +429,7 @@ plotly_data = fit_data(spend_cols[0],prospect_cols[0],channel_cols[0])
|
|
429 |
plotly_data.tail()
|
430 |
|
431 |
for i in range(1,13):
|
432 |
-
# # print(i)
|
433 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
434 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
435 |
|
@@ -440,7 +440,7 @@ def response_curves(channel,x_modified,y_modified):
|
|
440 |
|
441 |
x_col = (channel+"_Spends").replace('\xa0', '')
|
442 |
y_col = ("Fit_Data_"+channel).replace('\xa0', '')
|
443 |
-
|
444 |
# fig.add_trace(go.Scatter(
|
445 |
# x=plotly_data[x_col],
|
446 |
# y=plotly_data[y_col],
|
@@ -455,7 +455,7 @@ def response_curves(channel,x_modified,y_modified):
|
|
455 |
marker=dict(color = 'blue'),
|
456 |
name=x_col.replace('_Spends', '')
|
457 |
))
|
458 |
-
|
459 |
plotly_data2 = plotly_data.copy()
|
460 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
461 |
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
@@ -471,9 +471,10 @@ def response_curves(channel,x_modified,y_modified):
|
|
471 |
name="Current Spends"
|
472 |
))
|
473 |
|
|
|
474 |
fig.add_trace(go.Scatter(
|
475 |
-
x=[x_modified],
|
476 |
-
y=[y_modified],
|
477 |
mode='markers',
|
478 |
marker=dict(
|
479 |
size=13 # Adjust the size value to make the markers larger or smaller
|
|
|
112 |
for i in range(len(y_fit_inv_ext)):
|
113 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
114 |
|
115 |
+
# # # print(x_ext_data)
|
116 |
ext_df = pd.DataFrame()
|
117 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
118 |
ext_df[fit_col] = y_fit_inv_v2_ext
|
|
|
125 |
|
126 |
ext_df['MAT'] = ["ext","ext","ext"]
|
127 |
|
128 |
+
# # # print(ext_df)
|
129 |
plot_df= plot_df.append(ext_df)
|
130 |
return plot_df
|
131 |
|
|
|
148 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
149 |
|
150 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
151 |
+
# # # print(x_max)
|
152 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
153 |
# x_ext_data = [1500000,2000000,2500000]
|
154 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
|
|
157 |
for i in range(len(x_scaled)):
|
158 |
x_data.append(x_scaled[i][0])
|
159 |
|
160 |
+
# # # print(x_data)
|
161 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
162 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
163 |
|
|
|
170 |
|
171 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
172 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
173 |
+
# # # print('k: ',Kd_fit)
|
174 |
+
# # # print('n: ', n_fit)
|
175 |
|
176 |
##### extend_s_curve
|
177 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
|
|
183 |
plotly_data.tail()
|
184 |
|
185 |
for i in range(1,13):
|
186 |
+
# # # print(i)
|
187 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
188 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
189 |
|
|
|
211 |
|
212 |
plotly_data2 = plotly_data.copy()
|
213 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
214 |
+
# print(plotly_data[plotly_data2['Date'] == plotly_data2['Date'].max()][x_col])
|
215 |
# .dropna(subset=[x_col]).reset_index(inplace = True)
|
216 |
fig.add_trace(go.Scatter(
|
217 |
x=plotly_data[plotly_data2['Date'] == plotly_data2['Date'].max()][x_col],
|
|
|
358 |
for i in range(len(y_fit_inv_ext)):
|
359 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
360 |
|
361 |
+
# # # print(x_ext_data)
|
362 |
ext_df = pd.DataFrame()
|
363 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
364 |
ext_df[fit_col] = y_fit_inv_v2_ext
|
|
|
371 |
|
372 |
ext_df['MAT'] = ["ext","ext","ext"]
|
373 |
|
374 |
+
# # # print(ext_df)
|
375 |
plot_df= plot_df.append(ext_df)
|
376 |
return plot_df
|
377 |
|
|
|
394 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
395 |
|
396 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
397 |
+
# # # print(x_max)
|
398 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
399 |
# x_ext_data = [1500000,2000000,2500000]
|
400 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
|
|
403 |
for i in range(len(x_scaled)):
|
404 |
x_data.append(x_scaled[i][0])
|
405 |
|
406 |
+
# # # print(x_data)
|
407 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
408 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
409 |
|
|
|
416 |
|
417 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
418 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
419 |
+
# # # print('k: ',Kd_fit)
|
420 |
+
# # # print('n: ', n_fit)
|
421 |
|
422 |
##### extend_s_curve
|
423 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
|
|
429 |
plotly_data.tail()
|
430 |
|
431 |
for i in range(1,13):
|
432 |
+
# # # print(i)
|
433 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
434 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
435 |
|
|
|
440 |
|
441 |
x_col = (channel+"_Spends").replace('\xa0', '')
|
442 |
y_col = ("Fit_Data_"+channel).replace('\xa0', '')
|
443 |
+
|
444 |
# fig.add_trace(go.Scatter(
|
445 |
# x=plotly_data[x_col],
|
446 |
# y=plotly_data[y_col],
|
|
|
455 |
marker=dict(color = 'blue'),
|
456 |
name=x_col.replace('_Spends', '')
|
457 |
))
|
458 |
+
dividing_parameter = len(plotly_data1[plotly_data1[x_col].isnull()==False])
|
459 |
plotly_data2 = plotly_data.copy()
|
460 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
461 |
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
|
|
471 |
name="Current Spends"
|
472 |
))
|
473 |
|
474 |
+
# print(dividing_parameter)
|
475 |
fig.add_trace(go.Scatter(
|
476 |
+
x=[x_modified/dividing_parameter],
|
477 |
+
y=[y_modified/dividing_parameter],
|
478 |
mode='markers',
|
479 |
marker=dict(
|
480 |
size=13 # Adjust the size value to make the markers larger or smaller
|
response_curves_model_quality_base.py
CHANGED
@@ -111,7 +111,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
111 |
for i in range(len(y_fit_inv_ext)):
|
112 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
113 |
|
114 |
-
# # print(x_ext_data)
|
115 |
ext_df = pd.DataFrame()
|
116 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
117 |
ext_df[f'{channel}_Prospects'] = y_fit_inv_v2_ext
|
@@ -125,7 +125,7 @@ def data_output(channel,X,y,y_fit_inv,x_ext_data,y_fit_inv_ext):
|
|
125 |
|
126 |
ext_df['MAT'] = ["ext","ext","ext"]
|
127 |
|
128 |
-
# # print(ext_df.columns)
|
129 |
plot_df= plot_df.append(ext_df)
|
130 |
return plot_df
|
131 |
|
@@ -148,7 +148,7 @@ def input_data(df,spend_col,prospect_col):
|
|
148 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
149 |
|
150 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
151 |
-
# # print(x_max)
|
152 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
153 |
# x_ext_data = [1500000,2000000,2500000]
|
154 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
@@ -157,7 +157,7 @@ def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
|
157 |
for i in range(len(x_scaled)):
|
158 |
x_data.append(x_scaled[i][0])
|
159 |
|
160 |
-
# # print(x_data)
|
161 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
162 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
163 |
|
@@ -170,8 +170,8 @@ def fit_data(spend_col,prospect_col,channel):
|
|
170 |
|
171 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
172 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
173 |
-
# # print('k: ',Kd_fit)
|
174 |
-
# # print('n: ', n_fit)
|
175 |
|
176 |
##### extend_s_curve
|
177 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
@@ -183,7 +183,7 @@ plotly_data = fit_data(spend_cols[0],prospect_cols[0],channel_cols[0])
|
|
183 |
plotly_data.tail()
|
184 |
|
185 |
for i in range(1,13):
|
186 |
-
# print(i)
|
187 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
188 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
189 |
|
@@ -213,6 +213,7 @@ def response_curves(channel,chart_typ):
|
|
213 |
y=plotly_data.sort_values(by=x_col, ascending=True)[y_col],
|
214 |
mode=mode_f1,
|
215 |
name=x_col.replace('_Spends', '')
|
|
|
216 |
))
|
217 |
else:
|
218 |
mode_f1 = "markers"
|
@@ -240,12 +241,13 @@ def response_curves(channel,chart_typ):
|
|
240 |
y=plotly_data.sort_values(by=x_col, ascending=True)[y_col],
|
241 |
mode=mode_f1,
|
242 |
name=x_col.replace('_Spends', '')
|
|
|
243 |
))
|
244 |
|
245 |
|
246 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
247 |
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
248 |
-
# print(plotly_data2[x_col].mean(),plotly_data2[y_col].mean())
|
249 |
# import steamlit as st
|
250 |
# st.dataframe()
|
251 |
fig.add_trace(go.Scatter(
|
|
|
111 |
for i in range(len(y_fit_inv_ext)):
|
112 |
y_fit_inv_v2_ext.append(y_fit_inv_ext[i][0])
|
113 |
|
114 |
+
# # # print(x_ext_data)
|
115 |
ext_df = pd.DataFrame()
|
116 |
ext_df[f'{channel}_Spends'] = x_ext_data
|
117 |
ext_df[f'{channel}_Prospects'] = y_fit_inv_v2_ext
|
|
|
125 |
|
126 |
ext_df['MAT'] = ["ext","ext","ext"]
|
127 |
|
128 |
+
# # # print(ext_df.columns)
|
129 |
plot_df= plot_df.append(ext_df)
|
130 |
return plot_df
|
131 |
|
|
|
148 |
return X,y,x_data,y_data,x_minmax,y_minmax
|
149 |
|
150 |
def extend_s_curve(x_max,x_minmax,y_minmax, Kd_fit, n_fit):
|
151 |
+
# # # print(x_max)
|
152 |
x_ext_data = [x_max*1.2,x_max*1.3,x_max*1.5]
|
153 |
# x_ext_data = [1500000,2000000,2500000]
|
154 |
# x_ext_data = [x_max+100,x_max+200,x_max+5000]
|
|
|
157 |
for i in range(len(x_scaled)):
|
158 |
x_data.append(x_scaled[i][0])
|
159 |
|
160 |
+
# # # print(x_data)
|
161 |
y_fit = hill_equation(x_data, Kd_fit, n_fit)
|
162 |
y_fit_inv = y_minmax.inverse_transform(np.array(y_fit).reshape(-1,1))
|
163 |
|
|
|
170 |
|
171 |
X,y,x_data,y_data,x_minmax,y_minmax = input_data(temp_df,spend_col,prospect_col)
|
172 |
y_fit, y_fit_inv, Kd_fit, n_fit = hill_func(x_data,y_data,x_minmax,y_minmax)
|
173 |
+
# # # print('k: ',Kd_fit)
|
174 |
+
# # # print('n: ', n_fit)
|
175 |
|
176 |
##### extend_s_curve
|
177 |
x_ext_data,y_fit_inv_ext= extend_s_curve(temp_df[spend_col].max(),x_minmax,y_minmax, Kd_fit, n_fit)
|
|
|
183 |
plotly_data.tail()
|
184 |
|
185 |
for i in range(1,13):
|
186 |
+
# # print(i)
|
187 |
pdf = fit_data(spend_cols[i],prospect_cols[i],channel_cols[i])
|
188 |
plotly_data = plotly_data.merge(pdf,on = ["Date","MAT"],how = "left")
|
189 |
|
|
|
213 |
y=plotly_data.sort_values(by=x_col, ascending=True)[y_col],
|
214 |
mode=mode_f1,
|
215 |
name=x_col.replace('_Spends', '')
|
216 |
+
,line=dict(color='#4B88FF')
|
217 |
))
|
218 |
else:
|
219 |
mode_f1 = "markers"
|
|
|
241 |
y=plotly_data.sort_values(by=x_col, ascending=True)[y_col],
|
242 |
mode=mode_f1,
|
243 |
name=x_col.replace('_Spends', '')
|
244 |
+
,line=dict(color='#4B88FF')
|
245 |
))
|
246 |
|
247 |
|
248 |
plotly_data2 = plotly_data[plotly_data[x_col].isnull()==False]
|
249 |
plotly_data2 = plotly_data2[plotly_data2["MAT"]!="ext"]
|
250 |
+
# # print(plotly_data2[x_col].mean(),plotly_data2[y_col].mean())
|
251 |
# import steamlit as st
|
252 |
# st.dataframe()
|
253 |
fig.add_trace(go.Scatter(
|
summary_df.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1822
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01b3193c6c809623ba5c8c76bb74b4583886310b8e7e7d4e35e17bdd63fa37d7
|
3 |
size 1822
|
utilities.py
CHANGED
@@ -211,7 +211,7 @@ def initialize_data(
|
|
211 |
# uopx_conv_rates = {'streaming_impressions' : 0.007,'digital_impressions' : 0.007,'search_clicks' : 0.00719,'tv_impressions' : 0.000173,
|
212 |
# "digital_clicks":0.005,"streaming_clicks":0.004,'streaming_spends':1,"tv_spends":1,"search_spends":1,
|
213 |
# "digital_spends":1}
|
214 |
-
# # print('State initialized')
|
215 |
|
216 |
excel = pd.read_excel(target_file, sheet_name=None)
|
217 |
|
@@ -305,16 +305,16 @@ def initialize_data(
|
|
305 |
if updated_rcs is not None and updated_rcs_key in list(updated_rcs.keys()):
|
306 |
response_curves[inp_col] = updated_rcs[updated_rcs_key]
|
307 |
|
308 |
-
# # print(response_curves)
|
309 |
## conversion rates
|
310 |
spend_col = [
|
311 |
_col
|
312 |
for _col in spend_df.columns
|
313 |
if _col.startswith(inp_col.rsplit("_", 1)[0])
|
314 |
][0]
|
315 |
-
# # print(spend_col)
|
316 |
-
# # print('## printing spendssss')
|
317 |
-
# # print(spend_col)
|
318 |
conv = (
|
319 |
spend_df.set_index("Week")[spend_col]
|
320 |
/ input_df.set_index("Date")[inp_col].clip(lower=1)
|
@@ -324,10 +324,10 @@ def initialize_data(
|
|
324 |
conv_rates[inp_col] = list(conv.drop("Week", axis=1).mean().to_dict().values())[
|
325 |
0
|
326 |
]
|
327 |
-
# # print(conv_rates)
|
328 |
-
### print('Before',conv_rates[inp_col])
|
329 |
# conv_rates[inp_col] = uopx_conv_rates[inp_col]
|
330 |
-
### print('After',(conv_rates[inp_col]))
|
331 |
|
332 |
channel = Channel(
|
333 |
name=inp_col,
|
@@ -347,21 +347,21 @@ def initialize_data(
|
|
347 |
"num_pos_obsv":param_dicts["num_pos_obsv"][inp_col]
|
348 |
},
|
349 |
bounds=np.array([-10, 10]),
|
350 |
-
channel_bounds_min =
|
351 |
-
channel_bounds_max =
|
352 |
)
|
353 |
channels[inp_col] = channel
|
354 |
if sales is None:
|
355 |
sales = channel.actual_sales
|
356 |
else:
|
357 |
sales += channel.actual_sales
|
358 |
-
# # print(actual_output_dic)
|
359 |
other_contributions = (
|
360 |
output_df.drop([*output_cols], axis=1).sum(axis=1, numeric_only=True).values
|
361 |
)
|
362 |
correction = output_df.drop("Date", axis=1).sum(axis=1).values - (sales + other_contributions)
|
363 |
-
# # print(other_contributions)
|
364 |
-
# # print(correction)
|
365 |
scenario = Scenario(
|
366 |
name="default",
|
367 |
channels=channels,
|
|
|
211 |
# uopx_conv_rates = {'streaming_impressions' : 0.007,'digital_impressions' : 0.007,'search_clicks' : 0.00719,'tv_impressions' : 0.000173,
|
212 |
# "digital_clicks":0.005,"streaming_clicks":0.004,'streaming_spends':1,"tv_spends":1,"search_spends":1,
|
213 |
# "digital_spends":1}
|
214 |
+
# # # print('State initialized')
|
215 |
|
216 |
excel = pd.read_excel(target_file, sheet_name=None)
|
217 |
|
|
|
305 |
if updated_rcs is not None and updated_rcs_key in list(updated_rcs.keys()):
|
306 |
response_curves[inp_col] = updated_rcs[updated_rcs_key]
|
307 |
|
308 |
+
# # # print(response_curves)
|
309 |
## conversion rates
|
310 |
spend_col = [
|
311 |
_col
|
312 |
for _col in spend_df.columns
|
313 |
if _col.startswith(inp_col.rsplit("_", 1)[0])
|
314 |
][0]
|
315 |
+
# # # print(spend_col)
|
316 |
+
# # # print('## # printing spendssss')
|
317 |
+
# # # print(spend_col)
|
318 |
conv = (
|
319 |
spend_df.set_index("Week")[spend_col]
|
320 |
/ input_df.set_index("Date")[inp_col].clip(lower=1)
|
|
|
324 |
conv_rates[inp_col] = list(conv.drop("Week", axis=1).mean().to_dict().values())[
|
325 |
0
|
326 |
]
|
327 |
+
# # # print(conv_rates)
|
328 |
+
### # print('Before',conv_rates[inp_col])
|
329 |
# conv_rates[inp_col] = uopx_conv_rates[inp_col]
|
330 |
+
### # print('After',(conv_rates[inp_col]))
|
331 |
|
332 |
channel = Channel(
|
333 |
name=inp_col,
|
|
|
347 |
"num_pos_obsv":param_dicts["num_pos_obsv"][inp_col]
|
348 |
},
|
349 |
bounds=np.array([-10, 10]),
|
350 |
+
channel_bounds_min = round(param_dicts["x_min"][inp_col]*100*param_dicts["num_pos_obsv"][inp_col]/param_dicts["current_spends"][inp_col]),
|
351 |
+
channel_bounds_max = 100
|
352 |
)
|
353 |
channels[inp_col] = channel
|
354 |
if sales is None:
|
355 |
sales = channel.actual_sales
|
356 |
else:
|
357 |
sales += channel.actual_sales
|
358 |
+
# # # print(actual_output_dic)
|
359 |
other_contributions = (
|
360 |
output_df.drop([*output_cols], axis=1).sum(axis=1, numeric_only=True).values
|
361 |
)
|
362 |
correction = output_df.drop("Date", axis=1).sum(axis=1).values - (sales + other_contributions)
|
363 |
+
# # # print(other_contributions)
|
364 |
+
# # # print(correction)
|
365 |
scenario = Scenario(
|
366 |
name="default",
|
367 |
channels=channels,
|
utilities_with_panel.py
CHANGED
@@ -98,7 +98,7 @@ DATA_PATH = './data'
|
|
98 |
|
99 |
IMAGES_PATH = './data/images_224_224'
|
100 |
|
101 |
-
# New - S# print 2
|
102 |
if 'bin_dict' not in st.session_state:
|
103 |
|
104 |
with open("data_import.pkl", "rb") as f:
|
@@ -395,7 +395,7 @@ def initialize_data(target_col,selected_markets):
|
|
395 |
# uopx_conv_rates = {'streaming_impressions' : 0.007,'digital_impressions' : 0.007,'search_clicks' : 0.00719,'tv_impressions' : 0.000173,
|
396 |
# "digital_clicks":0.005,"streaming_clicks":0.004,'streaming_spends':1,"tv_spends":1,"search_spends":1,
|
397 |
# "digital_spends":1}
|
398 |
-
## print('State initialized')
|
399 |
# excel = pd.read_excel("data_test_overview_panel.xlsx",sheet_name=None)
|
400 |
#excel = pd.read_excel("Overview_data_test_panel@#revenue.xlsx" + target_col + ".xlsx",sheet_name=None)
|
401 |
|
@@ -469,7 +469,7 @@ def initialize_data(target_col,selected_markets):
|
|
469 |
for inp_col in channel_list:
|
470 |
#st.write(inp_col)
|
471 |
|
472 |
-
# # New - S# print 2
|
473 |
# if is_panel:
|
474 |
# input_df1 = input_df.groupby([date_col]).agg({inp_col:'sum'}).reset_index() # aggregate spends on date
|
475 |
# spends = input_df1[inp_col].values
|
@@ -484,7 +484,7 @@ def initialize_data(target_col,selected_markets):
|
|
484 |
|
485 |
|
486 |
# contribution
|
487 |
-
# New - S# print 2
|
488 |
out_col = [_col for _col in output_df.columns if _col.startswith(inp_col)][0]
|
489 |
if is_panel :
|
490 |
output_df1 = output_df.groupby([date_col]).agg({out_col:'sum'}).reset_index()
|
@@ -505,12 +505,12 @@ def initialize_data(target_col,selected_markets):
|
|
505 |
|
506 |
x = x.astype('float64')
|
507 |
y = y.astype('float64')
|
508 |
-
## print('## printing yyyyyyyyy')
|
509 |
-
## print(inp_col)
|
510 |
-
## print(x.max())
|
511 |
-
## print(y.max())
|
512 |
# st.write(y.max(),x.max())
|
513 |
-
# print(y.max(),x.max())
|
514 |
if y.max()<=0.01:
|
515 |
if x.max()<=0.01 :
|
516 |
st.write("here-here")
|
@@ -539,15 +539,15 @@ def initialize_data(target_col,selected_markets):
|
|
539 |
## conversion rates
|
540 |
spend_col = [_col for _col in spend_df.columns if _col.startswith(inp_col.rsplit('_',1)[0])][0]
|
541 |
|
542 |
-
## print('## printing spendssss')
|
543 |
-
## print(spend_col)
|
544 |
conv = (spend_df.set_index('Week')[spend_col] / input_df.set_index('Date')[inp_col].clip(lower=1)).reset_index()
|
545 |
conv.rename(columns={'index':'Week'},inplace=True)
|
546 |
conv['year'] = conv.Week.dt.year
|
547 |
conv_rates[inp_col] = list(conv.drop('Week',axis=1).mean().to_dict().values())[0]
|
548 |
-
### print('Before',conv_rates[inp_col])
|
549 |
# conv_rates[inp_col] = uopx_conv_rates[inp_col]
|
550 |
-
### print('After',(conv_rates[inp_col]))
|
551 |
|
552 |
|
553 |
channel = Channel(name=inp_col,dates=dates,
|
@@ -617,7 +617,7 @@ def initialize_data(target_col,selected_markets):
|
|
617 |
# channel_list = []
|
618 |
# for col in raw_df.columns:
|
619 |
# if 'click' in col.lower() or 'spend' in col.lower() or 'imp' in col.lower():
|
620 |
-
# ### print(col)
|
621 |
# channel_list.append(col)
|
622 |
# else:
|
623 |
# pass
|
@@ -708,7 +708,7 @@ def create_channel_summary(scenario):
|
|
708 |
if name_mod.lower().endswith(' imp'):
|
709 |
name_mod = name_mod.replace('Imp', ' Impressions')
|
710 |
|
711 |
-
# print(name_mod, channel.actual_total_spends, channel.conversion_rate,
|
712 |
channel.actual_total_spends * channel.conversion_rate
|
713 |
|
714 |
summary_columns.append(name_mod)
|
|
|
98 |
|
99 |
IMAGES_PATH = './data/images_224_224'
|
100 |
|
101 |
+
# New - S# # print 2
|
102 |
if 'bin_dict' not in st.session_state:
|
103 |
|
104 |
with open("data_import.pkl", "rb") as f:
|
|
|
395 |
# uopx_conv_rates = {'streaming_impressions' : 0.007,'digital_impressions' : 0.007,'search_clicks' : 0.00719,'tv_impressions' : 0.000173,
|
396 |
# "digital_clicks":0.005,"streaming_clicks":0.004,'streaming_spends':1,"tv_spends":1,"search_spends":1,
|
397 |
# "digital_spends":1}
|
398 |
+
## # print('State initialized')
|
399 |
# excel = pd.read_excel("data_test_overview_panel.xlsx",sheet_name=None)
|
400 |
#excel = pd.read_excel("Overview_data_test_panel@#revenue.xlsx" + target_col + ".xlsx",sheet_name=None)
|
401 |
|
|
|
469 |
for inp_col in channel_list:
|
470 |
#st.write(inp_col)
|
471 |
|
472 |
+
# # New - S# # print 2
|
473 |
# if is_panel:
|
474 |
# input_df1 = input_df.groupby([date_col]).agg({inp_col:'sum'}).reset_index() # aggregate spends on date
|
475 |
# spends = input_df1[inp_col].values
|
|
|
484 |
|
485 |
|
486 |
# contribution
|
487 |
+
# New - S# # print 2
|
488 |
out_col = [_col for _col in output_df.columns if _col.startswith(inp_col)][0]
|
489 |
if is_panel :
|
490 |
output_df1 = output_df.groupby([date_col]).agg({out_col:'sum'}).reset_index()
|
|
|
505 |
|
506 |
x = x.astype('float64')
|
507 |
y = y.astype('float64')
|
508 |
+
## # print('## # printing yyyyyyyyy')
|
509 |
+
## # print(inp_col)
|
510 |
+
## # print(x.max())
|
511 |
+
## # print(y.max())
|
512 |
# st.write(y.max(),x.max())
|
513 |
+
# # print(y.max(),x.max())
|
514 |
if y.max()<=0.01:
|
515 |
if x.max()<=0.01 :
|
516 |
st.write("here-here")
|
|
|
539 |
## conversion rates
|
540 |
spend_col = [_col for _col in spend_df.columns if _col.startswith(inp_col.rsplit('_',1)[0])][0]
|
541 |
|
542 |
+
## # print('## # printing spendssss')
|
543 |
+
## # print(spend_col)
|
544 |
conv = (spend_df.set_index('Week')[spend_col] / input_df.set_index('Date')[inp_col].clip(lower=1)).reset_index()
|
545 |
conv.rename(columns={'index':'Week'},inplace=True)
|
546 |
conv['year'] = conv.Week.dt.year
|
547 |
conv_rates[inp_col] = list(conv.drop('Week',axis=1).mean().to_dict().values())[0]
|
548 |
+
### # print('Before',conv_rates[inp_col])
|
549 |
# conv_rates[inp_col] = uopx_conv_rates[inp_col]
|
550 |
+
### # print('After',(conv_rates[inp_col]))
|
551 |
|
552 |
|
553 |
channel = Channel(name=inp_col,dates=dates,
|
|
|
617 |
# channel_list = []
|
618 |
# for col in raw_df.columns:
|
619 |
# if 'click' in col.lower() or 'spend' in col.lower() or 'imp' in col.lower():
|
620 |
+
# ### # print(col)
|
621 |
# channel_list.append(col)
|
622 |
# else:
|
623 |
# pass
|
|
|
708 |
if name_mod.lower().endswith(' imp'):
|
709 |
name_mod = name_mod.replace('Imp', ' Impressions')
|
710 |
|
711 |
+
# # print(name_mod, channel.actual_total_spends, channel.conversion_rate,
|
712 |
channel.actual_total_spends * channel.conversion_rate
|
713 |
|
714 |
summary_columns.append(name_mod)
|