James McCool
SMALL adj
0899e0e
raw
history blame
26.8 kB
import streamlit as st
import numpy as np
import pandas as pd
import time
import math
from difflib import SequenceMatcher
def calculate_weighted_ownership_vectorized(ownership_array):
"""
Vectorized version of calculate_weighted_ownership using NumPy operations.
Args:
ownership_array: 2D array of ownership values (rows x players)
Returns:
array: Calculated weighted ownership values for each row
"""
# Convert percentages to decimals and handle NaN values
ownership_array = np.where(np.isnan(ownership_array), 0, ownership_array) / 100
# Calculate row means
row_means = np.mean(ownership_array, axis=1, keepdims=True)
# Calculate average of each value with the overall mean
value_means = (ownership_array + row_means) / 2
# Take average of all those means
avg_of_means = np.mean(value_means, axis=1)
# Multiply by count of values
weighted = avg_of_means * ownership_array.shape[1]
# Subtract (max - min) for each row
row_max = np.max(ownership_array, axis=1)
row_min = np.min(ownership_array, axis=1)
weighted = weighted - (row_max - row_min)
# Convert back to percentage form
return weighted * 10000
def calculate_weighted_ownership_wrapper(row_ownerships):
"""
Wrapper function for the original calculate_weighted_ownership to work with Pandas .apply()
Args:
row_ownerships: Series containing ownership values in percentage form
Returns:
float: Calculated weighted ownership value
"""
# Convert Series to 2D array for vectorized function
ownership_array = row_ownerships.values.reshape(1, -1)
return calculate_weighted_ownership_vectorized(ownership_array)[0]
def calculate_player_similarity_score_vectorized(portfolio, player_columns):
"""
Vectorized version of calculate_player_similarity_score using NumPy operations.
"""
# Extract player data and convert to string array
player_data = portfolio[player_columns].astype(str).fillna('').values
# Get all unique players and create a mapping to numeric IDs
all_players = set()
for row in player_data:
for val in row:
if isinstance(val, str) and val.strip() != '':
all_players.add(val)
# Create player ID mapping
player_to_id = {player: idx for idx, player in enumerate(sorted(all_players))}
# Convert each row to a binary vector (1 if player is present, 0 if not)
n_players = len(all_players)
n_rows = len(portfolio)
binary_matrix = np.zeros((n_rows, n_players), dtype=np.int8)
# Vectorized binary matrix creation
for i, row in enumerate(player_data):
for val in row:
if isinstance(val, str) and str(val).strip() != '' and str(val) in player_to_id:
binary_matrix[i, player_to_id[str(val)]] = 1
# Vectorized Jaccard distance calculation
intersection_matrix = np.dot(binary_matrix, binary_matrix.T)
row_sums = np.sum(binary_matrix, axis=1)
union_matrix = row_sums[:, np.newaxis] + row_sums - intersection_matrix
# Calculate Jaccard distance: 1 - (intersection / union)
with np.errstate(divide='ignore', invalid='ignore'):
jaccard_similarity = np.divide(intersection_matrix, union_matrix,
out=np.zeros_like(intersection_matrix, dtype=float),
where=union_matrix != 0)
jaccard_distance = 1 - jaccard_similarity
# Exclude self-comparison and calculate average distance for each row
np.fill_diagonal(jaccard_distance, 0)
row_counts = n_rows - 1
similarity_scores = np.sum(jaccard_distance, axis=1) / row_counts
# Normalize to 0-1 scale
score_range = similarity_scores.max() - similarity_scores.min()
if score_range > 0:
similarity_scores = (similarity_scores - similarity_scores.min()) / score_range
return similarity_scores
# Keep the original function for backward compatibility
def predict_dupes(portfolio, maps_dict, site_var, type_var, Contest_Size, strength_var, sport_var, max_salary):
if strength_var == 'Weak':
dupes_multiplier = .75
percentile_multiplier = .90
elif strength_var == 'Average':
dupes_multiplier = 1.00
percentile_multiplier = 1.00
elif strength_var == 'Sharp':
dupes_multiplier = 1.25
percentile_multiplier = 1.10
if sport_var == 'NFL':
own_baseline = 180
else:
own_baseline = 120
max_ownership = max(maps_dict['own_map'].values()) / 100
average_ownership = np.mean(list(maps_dict['own_map'].values())) / 100
if type_var == 'Showdown':
if sport_var == 'GOLF':
dup_count_columns = ['FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank', 'FLEX6_Own_percent_rank']
own_columns = ['FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own', 'FLEX6_Own']
else:
dup_count_columns = ['CPT_Own_percent_rank', 'FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank']
own_columns = ['CPT_Own', 'FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own']
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio']
# Get the original player columns (first 6 columns excluding salary, median, Own)
player_columns = [col for col in portfolio.columns[:6] if col not in ['salary', 'median', 'Own']]
n_rows = len(portfolio)
# Assign ranks back to individual columns using the same rank scale
if sport_var == 'GOLF':
flex_ownerships = pd.concat([
portfolio.iloc[:,1].map(maps_dict['own_map']),
portfolio.iloc[:,2].map(maps_dict['own_map']),
portfolio.iloc[:,3].map(maps_dict['own_map']),
portfolio.iloc[:,4].map(maps_dict['own_map']),
portfolio.iloc[:,5].map(maps_dict['own_map']),
portfolio.iloc[:,6].map(maps_dict['own_map'])
])
flex_rank = flex_ownerships.rank(pct=True)
portfolio['FLEX1_Own_percent_rank'] = flex_rank.iloc[0:n_rows].values
portfolio['FLEX2_Own_percent_rank'] = flex_rank.iloc[n_rows:2*n_rows].values
portfolio['FLEX3_Own_percent_rank'] = flex_rank.iloc[2*n_rows:3*n_rows].values
portfolio['FLEX4_Own_percent_rank'] = flex_rank.iloc[3*n_rows:4*n_rows].values
portfolio['FLEX5_Own_percent_rank'] = flex_rank.iloc[4*n_rows:5*n_rows].values
portfolio['FLEX6_Own_percent_rank'] = flex_rank.iloc[5*n_rows:6*n_rows].values
portfolio['FLEX1_Own'] = portfolio.iloc[:,0].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX2_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX3_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX4_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX5_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX6_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100
else:
flex_ownerships = pd.concat([
portfolio.iloc[:,1].map(maps_dict['own_map']),
portfolio.iloc[:,2].map(maps_dict['own_map']),
portfolio.iloc[:,3].map(maps_dict['own_map']),
portfolio.iloc[:,4].map(maps_dict['own_map']),
portfolio.iloc[:,5].map(maps_dict['own_map'])
])
flex_rank = flex_ownerships.rank(pct=True)
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True)
portfolio['FLEX1_Own_percent_rank'] = flex_rank.iloc[0:n_rows].values
portfolio['FLEX2_Own_percent_rank'] = flex_rank.iloc[n_rows:2*n_rows].values
portfolio['FLEX3_Own_percent_rank'] = flex_rank.iloc[2*n_rows:3*n_rows].values
portfolio['FLEX4_Own_percent_rank'] = flex_rank.iloc[3*n_rows:4*n_rows].values
portfolio['FLEX5_Own_percent_rank'] = flex_rank.iloc[4*n_rows:5*n_rows].values
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100
portfolio['FLEX1_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX2_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX3_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX4_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX5_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100
portfolio['own_product'] = (portfolio[own_columns].product(axis=1))
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1)
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1)
# Calculate dupes formula (in progress still)
portfolio['dupes_calc'] = ((portfolio['own_product'] + ((portfolio['CPT_Own_percent_rank'] - .50) / 1000) + ((portfolio['Own'] / 6) / (max_salary / 2))) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100)
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (own_baseline + (Contest_Size / 1000)))
portfolio['dupes_calc'] = ((((portfolio['salary'] / (max_salary * 0.96)) - 1)*(max_salary / 10000)) + 1) * portfolio['dupes_calc']
portfolio['dupes_calc'] = portfolio['dupes_calc'] * ((portfolio['CPT_Own_percent_rank'] + .50) / (portfolio['Own'] / 90))
# Round and handle negative values
portfolio['Dupes'] = np.where(
portfolio['salary'] == max_salary,
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10),
portfolio['dupes_calc']
)
portfolio['Dupes'] = np.where(
np.round(portfolio['Dupes'], 0) <= 0,
0,
np.round(portfolio['Dupes'], 0) - 1
)
print(portfolio['own_product'])
print(portfolio['avg_own_rank'])
print(portfolio['salary'])
print(portfolio['Own'])
print(portfolio['dupes_calc'])
print(portfolio['Dupes'])
elif type_var == 'Classic':
if sport_var == 'CS2':
dup_count_columns = ['CPT_Own_percent_rank', 'FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank']
own_columns = ['CPT_Own', 'FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own']
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio']
# Get the original player columns (first 6 columns excluding salary, median, Own)
player_columns = [col for col in portfolio.columns[:6] if col not in ['salary', 'median', 'Own']]
n_rows = len(portfolio)
flex_ownerships = pd.concat([
portfolio.iloc[:,1].map(maps_dict['own_map']),
portfolio.iloc[:,2].map(maps_dict['own_map']),
portfolio.iloc[:,3].map(maps_dict['own_map']),
portfolio.iloc[:,4].map(maps_dict['own_map']),
portfolio.iloc[:,5].map(maps_dict['own_map'])
])
flex_rank = flex_ownerships.rank(pct=True)
# Assign ranks back to individual columns using the same rank scale
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True)
portfolio['FLEX1_Own_percent_rank'] = flex_rank.iloc[0:n_rows].values
portfolio['FLEX2_Own_percent_rank'] = flex_rank.iloc[n_rows:2*n_rows].values
portfolio['FLEX3_Own_percent_rank'] = flex_rank.iloc[2*n_rows:3*n_rows].values
portfolio['FLEX4_Own_percent_rank'] = flex_rank.iloc[3*n_rows:4*n_rows].values
portfolio['FLEX5_Own_percent_rank'] = flex_rank.iloc[4*n_rows:5*n_rows].values
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100
portfolio['FLEX1_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX2_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX3_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX4_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100
portfolio['FLEX5_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1)
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1)
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1)
# Calculate dupes formula
portfolio['dupes_calc'] = ((portfolio['own_product'] * 10) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 50) - ((max_salary - portfolio['salary']) / 50)
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000)))
# Round and handle negative values
portfolio['Dupes'] = np.where(
portfolio['salary'] == max_salary,
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10),
portfolio['dupes_calc']
)
portfolio['Dupes'] = np.where(
np.round(portfolio['Dupes'], 0) <= 0,
0,
np.round(portfolio['Dupes'], 0) - 1
)
if sport_var == 'LOL':
dup_count_columns = ['CPT_Own_percent_rank', 'TOP_Own_percent_rank', 'JNG_Own_percent_rank', 'MID_Own_percent_rank', 'ADC_Own_percent_rank', 'SUP_Own_percent_rank', 'Team_Own_percent_rank']
own_columns = ['CPT_Own', 'TOP_Own', 'JNG_Own', 'MID_Own', 'ADC_Own', 'SUP_Own', 'Team_Own']
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio']
# Get the original player columns (first 6 columns excluding salary, median, Own)
player_columns = [col for col in portfolio.columns[:7] if col not in ['salary', 'median', 'Own']]
n_rows = len(portfolio)
flex_ownerships = pd.concat([
portfolio.iloc[:,1].map(maps_dict['own_map']),
portfolio.iloc[:,2].map(maps_dict['own_map']),
portfolio.iloc[:,3].map(maps_dict['own_map']),
portfolio.iloc[:,4].map(maps_dict['own_map']),
portfolio.iloc[:,5].map(maps_dict['own_map']),
portfolio.iloc[:,6].map(maps_dict['own_map'])
])
flex_rank = flex_ownerships.rank(pct=True)
# Assign ranks back to individual columns using the same rank scale
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True)
portfolio['TOP_Own_percent_rank'] = flex_rank.iloc[0:n_rows].values
portfolio['JNG_Own_percent_rank'] = flex_rank.iloc[n_rows:2*n_rows].values
portfolio['MID_Own_percent_rank'] = flex_rank.iloc[2*n_rows:3*n_rows].values
portfolio['ADC_Own_percent_rank'] = flex_rank.iloc[3*n_rows:4*n_rows].values
portfolio['SUP_Own_percent_rank'] = flex_rank.iloc[4*n_rows:5*n_rows].values
portfolio['Team_Own_percent_rank'] = flex_rank.iloc[5*n_rows:6*n_rows].values
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100
portfolio['TOP_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['JNG_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100
portfolio['MID_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100
portfolio['ADC_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100
portfolio['SUP_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100
portfolio['Team_Own'] = portfolio.iloc[:,6].map(maps_dict['own_map']).astype('float32') / 100
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1)
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1)
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1)
# Calculate dupes formula
portfolio['dupes_calc'] = ((portfolio['own_product'] * 10) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 50) - ((max_salary - portfolio['salary']) / 50)
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000)))
# Round and handle negative values
portfolio['Dupes'] = np.where(
portfolio['salary'] == max_salary,
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10),
portfolio['dupes_calc']
)
portfolio['Dupes'] = np.where(
np.round(portfolio['Dupes'], 0) <= 0,
0,
np.round(portfolio['Dupes'], 0) - 1
)
elif sport_var == 'GOLF':
num_players = len([col for col in portfolio.columns if col not in ['salary', 'median', 'Own']])
dup_count_columns = [f'player_{i}_percent_rank' for i in range(1, num_players + 1)]
own_columns = [f'player_{i}_own' for i in range(1, num_players + 1)]
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio']
# Get the original player columns (first num_players columns excluding salary, median, Own)
player_columns = [col for col in portfolio.columns[:num_players] if col not in ['salary', 'median', 'Own']]
for i in range(1, num_players + 1):
portfolio[f'player_{i}_percent_rank'] = portfolio.iloc[:,i-1].map(maps_dict['own_percent_rank'])
portfolio[f'player_{i}_own'] = portfolio.iloc[:,i-1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1)
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1)
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1)
portfolio['dupes_calc'] = (portfolio['own_product'] * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100)
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000)))
# Round and handle negative values
portfolio['Dupes'] = np.where(
portfolio['salary'] == max_salary,
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10),
portfolio['dupes_calc']
)
portfolio['Dupes'] = np.where(
np.round(portfolio['Dupes'], 0) <= 0,
0,
np.round(portfolio['Dupes'], 0) - 1
)
else:
num_players = len([col for col in portfolio.columns if col not in ['salary', 'median', 'Own']])
dup_count_columns = [f'player_{i}_percent_rank' for i in range(1, num_players + 1)]
own_columns = [f'player_{i}_own' for i in range(1, num_players + 1)]
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio']
# Get the original player columns (first num_players columns excluding salary, median, Own)
player_columns = [col for col in portfolio.columns[:num_players] if col not in ['salary', 'median', 'Own']]
for i in range(1, num_players + 1):
portfolio[f'player_{i}_percent_rank'] = portfolio.iloc[:,i-1].map(maps_dict['own_percent_rank'])
portfolio[f'player_{i}_own'] = portfolio.iloc[:,i-1].map(maps_dict['own_map']).astype('float32') / 100
portfolio['own_product'] = (portfolio[own_columns].product(axis=1))
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1)
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1)
portfolio['dupes_calc'] = (portfolio['own_product'] * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100)
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000)))
# Round and handle negative values
portfolio['Dupes'] = np.where(
portfolio['salary'] == max_salary,
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10),
portfolio['dupes_calc']
)
portfolio['Dupes'] = np.where(
np.round(portfolio['Dupes'], 0) <= 0,
0,
np.round(portfolio['Dupes'], 0) - 1
)
portfolio['Dupes'] = np.round(portfolio['Dupes'], 0)
portfolio['own_ratio'] = np.where(
portfolio[own_columns].isin([max_ownership]).any(axis=1),
portfolio['own_sum'] / portfolio['own_average'],
(portfolio['own_sum'] - max_ownership) / portfolio['own_average']
)
percentile_cut_scalar = portfolio['median'].max()
if type_var == 'Classic':
if sport_var == 'CS2':
own_ratio_nerf = 2
elif sport_var == 'LOL':
own_ratio_nerf = 2
else:
own_ratio_nerf = 1.5
elif type_var == 'Showdown':
own_ratio_nerf = 1.5
portfolio['Finish_percentile'] = portfolio.apply(
lambda row: .0005 if (row['own_ratio'] - own_ratio_nerf) / ((5 * (row['median'] / percentile_cut_scalar)) / 3) < .0005
else ((row['own_ratio'] - own_ratio_nerf) / ((5 * (row['median'] / percentile_cut_scalar)) / 3)) / 2,
axis=1
)
portfolio['Ref_Proj'] = portfolio['median'].max()
portfolio['Max_Proj'] = portfolio['Ref_Proj'] + 10
portfolio['Min_Proj'] = portfolio['Ref_Proj'] - 10
portfolio['Avg_Ref'] = (portfolio['Max_Proj'] + portfolio['Min_Proj']) / 2
portfolio['Win%'] = (((portfolio['median'] / portfolio['Avg_Ref']) - (0.1 + ((portfolio['Ref_Proj'] - portfolio['median'])/100))) / (Contest_Size / 1000)) / 10
max_allowed_win = (1 / Contest_Size) * 5
portfolio['Win%'] = portfolio['Win%'] / portfolio['Win%'].max() * max_allowed_win
portfolio['Finish_percentile'] = portfolio['Finish_percentile'] + .005 + (.005 * (Contest_Size / 10000))
portfolio['Finish_percentile'] = portfolio['Finish_percentile'] * percentile_multiplier * (portfolio['Own'] / (100 + (Contest_Size / 1000)))
portfolio['Win%'] = portfolio['Win%'] * (1 - portfolio['Finish_percentile'])
portfolio['Win%'] = portfolio['Win%'].clip(lower=0, upper=max_allowed_win)
portfolio['low_own_count'] = portfolio[own_columns].apply(lambda row: (row < 0.10).sum(), axis=1)
portfolio['Finish_percentile'] = portfolio.apply(lambda row: row['Finish_percentile'] if row['low_own_count'] <= 0 else row['Finish_percentile'] / row['low_own_count'], axis=1)
portfolio['Lineup Edge'] = portfolio['Win%'] * ((.5 - portfolio['Finish_percentile']) * (Contest_Size / 2.5))
portfolio['Lineup Edge'] = portfolio.apply(lambda row: row['Lineup Edge'] / (row['Dupes'] + 1) if row['Dupes'] > 0 else row['Lineup Edge'], axis=1)
portfolio['Lineup Edge'] = portfolio['Lineup Edge'] - portfolio['Lineup Edge'].mean()
portfolio['Weighted Own'] = portfolio[own_columns].apply(calculate_weighted_ownership_wrapper, axis=1)
portfolio['Geomean'] = np.power((portfolio[own_columns] * 100).product(axis=1), 1 / len(own_columns))
# Calculate similarity score based on actual player selection
portfolio['Diversity'] = calculate_player_similarity_score_vectorized(portfolio, player_columns)
check_portfolio = portfolio.copy()
portfolio = portfolio.drop(columns=dup_count_columns)
portfolio = portfolio.drop(columns=own_columns)
portfolio = portfolio.drop(columns=calc_columns)
int16_columns_stacks = ['Dupes', 'Size', 'salary']
int16_columns_nstacks = ['Dupes', 'salary']
float32_columns = ['median', 'Own', 'Finish_percentile', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Diversity']
print(portfolio.columns)
print(portfolio.head(10))
try:
portfolio[int16_columns_stacks] = portfolio[int16_columns_stacks].astype('uint16')
except:
pass
try:
portfolio[int16_columns_nstacks] = portfolio[int16_columns_nstacks].astype('uint16')
except:
pass
if sport_var != 'LOL':
try:
portfolio[float32_columns] = portfolio[float32_columns].astype('float32')
except:
pass
#printing a check frame
return portfolio, check_portfolio