James McCool
Enhance Lineup Edge calculation in predict_dupes.py by normalizing the edge values based on the maximum and minimum edges. This adjustment improves the scaling of the Lineup Edge, leading to more accurate portfolio predictions.
7928ec7
import streamlit as st | |
import numpy as np | |
import pandas as pd | |
import time | |
import math | |
from difflib import SequenceMatcher | |
import scipy.stats | |
def calculate_weighted_ownership_vectorized(ownership_array): | |
""" | |
Vectorized version of calculate_weighted_ownership using NumPy operations. | |
Args: | |
ownership_array: 2D array of ownership values (rows x players) | |
Returns: | |
array: Calculated weighted ownership values for each row | |
""" | |
# Convert percentages to decimals and handle NaN values | |
ownership_array = np.where(np.isnan(ownership_array), 0, ownership_array) / 100 | |
# Calculate row means | |
row_means = np.mean(ownership_array, axis=1, keepdims=True) | |
# Calculate average of each value with the overall mean | |
value_means = (ownership_array + row_means) / 2 | |
# Take average of all those means | |
avg_of_means = np.mean(value_means, axis=1) | |
# Multiply by count of values | |
weighted = avg_of_means * ownership_array.shape[1] | |
# Subtract (max - min) for each row | |
row_max = np.max(ownership_array, axis=1) | |
row_min = np.min(ownership_array, axis=1) | |
weighted = weighted - (row_max - row_min) | |
# Convert back to percentage form | |
return weighted * 10000 | |
def calculate_flex_ranks_efficient(portfolio, start_col, end_col, maps_dict, map_key='own_map'): | |
"""Memory-efficient replacement for pd.concat + rank operations""" | |
n_rows = len(portfolio) | |
n_cols = end_col - start_col | |
# Pre-allocate result arrays | |
all_values = np.zeros(n_rows * n_cols, dtype=np.float32) | |
# Fill values column by column | |
for i, col_idx in enumerate(range(start_col, end_col)): | |
start_idx = i * n_rows | |
end_idx = (i + 1) * n_rows | |
all_values[start_idx:end_idx] = portfolio.iloc[:, col_idx].map(maps_dict[map_key]).values | |
# Calculate percentile ranks efficiently | |
ranks = scipy.stats.rankdata(all_values, method='average') / len(all_values) | |
# Reshape back to individual column ranks | |
result_ranks = {} | |
for i in range(n_cols): | |
start_idx = i * n_rows | |
end_idx = (i + 1) * n_rows | |
result_ranks[i] = ranks[start_idx:end_idx] | |
return result_ranks | |
def calculate_weighted_ownership_wrapper(row_ownerships): | |
""" | |
Wrapper function for the original calculate_weighted_ownership to work with Pandas .apply() | |
Args: | |
row_ownerships: Series containing ownership values in percentage form | |
Returns: | |
float: Calculated weighted ownership value | |
""" | |
# Convert Series to 2D array for vectorized function | |
ownership_array = row_ownerships.values.reshape(1, -1) | |
return calculate_weighted_ownership_vectorized(ownership_array)[0] | |
def calculate_player_similarity_score_chunked(portfolio, player_columns, chunk_size=1000): | |
""" | |
Memory-efficient version that processes similarities in chunks | |
""" | |
# Same setup as before | |
player_data = portfolio[player_columns].astype(str).fillna('').values | |
all_players = set() | |
for row in player_data: | |
for val in row: | |
if isinstance(val, str) and val.strip() != '': | |
all_players.add(val) | |
player_to_id = {player: idx for idx, player in enumerate(sorted(all_players))} | |
n_players = len(all_players) | |
n_rows = len(portfolio) | |
binary_matrix = np.zeros((n_rows, n_players), dtype=np.int8) | |
for i, row in enumerate(player_data): | |
for val in row: | |
if isinstance(val, str) and str(val).strip() != '' and str(val) in player_to_id: | |
binary_matrix[i, player_to_id[str(val)]] = 1 | |
# Process similarities in chunks to avoid massive matrices | |
similarity_scores = np.zeros(n_rows) | |
for i in range(0, n_rows, chunk_size): | |
end_i = min(i + chunk_size, n_rows) | |
chunk_binary = binary_matrix[i:end_i] | |
# Calculate similarities for this chunk only | |
intersection = np.dot(chunk_binary, binary_matrix.T) | |
chunk_row_sums = np.sum(chunk_binary, axis=1) | |
all_row_sums = np.sum(binary_matrix, axis=1) | |
union = chunk_row_sums[:, np.newaxis] + all_row_sums - intersection | |
with np.errstate(divide='ignore', invalid='ignore'): | |
jaccard_sim = np.divide(intersection, union, | |
out=np.zeros_like(intersection, dtype=float), | |
where=union != 0) | |
jaccard_dist = 1 - jaccard_sim | |
# Exclude self-comparison and calculate average | |
for j in range(len(jaccard_dist)): | |
actual_idx = i + j | |
jaccard_dist[j, actual_idx] = 0 # Exclude self | |
similarity_scores[i:end_i] = np.sum(jaccard_dist, axis=1) / (n_rows - 1) | |
# Normalize | |
score_range = similarity_scores.max() - similarity_scores.min() | |
if score_range > 0: | |
similarity_scores = (similarity_scores - similarity_scores.min()) / score_range | |
return similarity_scores | |
# Keep the original function for backward compatibility | |
def predict_dupes(portfolio, maps_dict, site_var, type_var, Contest_Size, strength_var, sport_var, max_salary): | |
if strength_var == 'Weak': | |
dupes_multiplier = .75 | |
percentile_multiplier = .90 | |
elif strength_var == 'Average': | |
dupes_multiplier = 1.00 | |
percentile_multiplier = 1.00 | |
elif strength_var == 'Sharp': | |
dupes_multiplier = 1.25 | |
percentile_multiplier = 1.10 | |
if sport_var == 'NFL': | |
own_baseline = 180 | |
else: | |
own_baseline = 120 | |
max_ownership = max(maps_dict['own_map'].values()) / 100 | |
average_ownership = np.mean(list(maps_dict['own_map'].values())) / 100 | |
if type_var == 'Showdown': | |
if sport_var == 'GOLF': | |
dup_count_columns = ['FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank', 'FLEX6_Own_percent_rank'] | |
own_columns = ['FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own', 'FLEX6_Own'] | |
else: | |
dup_count_columns = ['CPT_Own_percent_rank', 'FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank'] | |
own_columns = ['CPT_Own', 'FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own'] | |
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio'] | |
# Get the original player columns (first 6 columns excluding salary, median, Own) | |
player_columns = [col for col in portfolio.columns[:6] if col not in ['salary', 'median', 'Own']] | |
n_rows = len(portfolio) | |
# Assign ranks back to individual columns using the same rank scale | |
if sport_var == 'GOLF': | |
flex_ranks = calculate_flex_ranks_efficient(portfolio, 1, 7, maps_dict) | |
portfolio['FLEX1_Own_percent_rank'] = flex_ranks[0] | |
portfolio['FLEX2_Own_percent_rank'] = flex_ranks[1] | |
portfolio['FLEX3_Own_percent_rank'] = flex_ranks[2] | |
portfolio['FLEX4_Own_percent_rank'] = flex_ranks[3] | |
portfolio['FLEX5_Own_percent_rank'] = flex_ranks[4] | |
portfolio['FLEX6_Own_percent_rank'] = flex_ranks[5] | |
portfolio['FLEX1_Own'] = portfolio.iloc[:,0].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX2_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX3_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX4_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX5_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX6_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100 | |
else: | |
flex_ranks = calculate_flex_ranks_efficient(portfolio, 1, 6, maps_dict) | |
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True) | |
portfolio['FLEX1_Own_percent_rank'] = flex_ranks[0] | |
portfolio['FLEX2_Own_percent_rank'] = flex_ranks[1] | |
portfolio['FLEX3_Own_percent_rank'] = flex_ranks[2] | |
portfolio['FLEX4_Own_percent_rank'] = flex_ranks[3] | |
portfolio['FLEX5_Own_percent_rank'] = flex_ranks[4] | |
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100 | |
portfolio['FLEX1_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX2_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX3_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX4_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX5_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) | |
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100 | |
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1) | |
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1) | |
# Calculate dupes formula (in progress still) | |
portfolio['dupes_calc'] = ((portfolio['own_product'] + ((portfolio['CPT_Own_percent_rank'] - .50) / 1000) + ((portfolio['Own'] / 6) / (max_salary / 2))) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100) | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (own_baseline + (Contest_Size / 1000))) | |
portfolio['dupes_calc'] = ((((portfolio['salary'] / (max_salary * 0.98)) - 1)*(max_salary / 10000)) + 1) * portfolio['dupes_calc'] | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * ((portfolio['CPT_Own_percent_rank'] + .50) / (portfolio['Own'] / 110)) | |
# Round and handle negative values | |
portfolio['Dupes'] = np.where( | |
portfolio['salary'] == max_salary, | |
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10), | |
portfolio['dupes_calc'] | |
) | |
portfolio['Dupes'] = np.where( | |
np.round(portfolio['Dupes'], 0) <= 0, | |
0, | |
np.round(portfolio['Dupes'], 0) - 1 | |
) | |
elif type_var == 'Classic': | |
if sport_var == 'CS2': | |
dup_count_columns = ['CPT_Own_percent_rank', 'FLEX1_Own_percent_rank', 'FLEX2_Own_percent_rank', 'FLEX3_Own_percent_rank', 'FLEX4_Own_percent_rank', 'FLEX5_Own_percent_rank'] | |
own_columns = ['CPT_Own', 'FLEX1_Own', 'FLEX2_Own', 'FLEX3_Own', 'FLEX4_Own', 'FLEX5_Own'] | |
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio'] | |
# Get the original player columns (first 6 columns excluding salary, median, Own) | |
player_columns = [col for col in portfolio.columns[:6] if col not in ['salary', 'median', 'Own']] | |
n_rows = len(portfolio) | |
flex_ranks = calculate_flex_ranks_efficient(portfolio, 1, 6, maps_dict) | |
# Assign ranks back to individual columns using the same rank scale | |
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True) | |
portfolio['FLEX1_Own_percent_rank'] = flex_ranks[0] | |
portfolio['FLEX2_Own_percent_rank'] = flex_ranks[1] | |
portfolio['FLEX3_Own_percent_rank'] = flex_ranks[2] | |
portfolio['FLEX4_Own_percent_rank'] = flex_ranks[3] | |
portfolio['FLEX5_Own_percent_rank'] = flex_ranks[4] | |
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100 | |
portfolio['FLEX1_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX2_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX3_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX4_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['FLEX5_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1) | |
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100 | |
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1) | |
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1) | |
# Calculate dupes formula | |
portfolio['dupes_calc'] = ((portfolio['own_product'] * 10) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 50) - ((max_salary - portfolio['salary']) / 50) | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000))) | |
# Round and handle negative values | |
portfolio['Dupes'] = np.where( | |
portfolio['salary'] == max_salary, | |
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10), | |
portfolio['dupes_calc'] | |
) | |
portfolio['Dupes'] = np.where( | |
np.round(portfolio['Dupes'], 0) <= 0, | |
0, | |
np.round(portfolio['Dupes'], 0) - 1 | |
) | |
if sport_var == 'LOL': | |
dup_count_columns = ['CPT_Own_percent_rank', 'TOP_Own_percent_rank', 'JNG_Own_percent_rank', 'MID_Own_percent_rank', 'ADC_Own_percent_rank', 'SUP_Own_percent_rank', 'Team_Own_percent_rank'] | |
own_columns = ['CPT_Own', 'TOP_Own', 'JNG_Own', 'MID_Own', 'ADC_Own', 'SUP_Own', 'Team_Own'] | |
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio'] | |
# Get the original player columns (first 6 columns excluding salary, median, Own) | |
player_columns = [col for col in portfolio.columns[:7] if col not in ['salary', 'median', 'Own']] | |
n_rows = len(portfolio) | |
flex_ranks = calculate_flex_ranks_efficient(portfolio, 1, 7, maps_dict) | |
# Assign ranks back to individual columns using the same rank scale | |
portfolio['CPT_Own_percent_rank'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).rank(pct=True) | |
portfolio['TOP_Own_percent_rank'] = flex_ranks[0] | |
portfolio['JNG_Own_percent_rank'] = flex_ranks[1] | |
portfolio['MID_Own_percent_rank'] = flex_ranks[2] | |
portfolio['ADC_Own_percent_rank'] = flex_ranks[3] | |
portfolio['SUP_Own_percent_rank'] = flex_ranks[4] | |
portfolio['Team_Own_percent_rank'] = flex_ranks[5] | |
portfolio['CPT_Own'] = portfolio.iloc[:,0].map(maps_dict['cpt_own_map']).astype('float32') / 100 | |
portfolio['TOP_Own'] = portfolio.iloc[:,1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['JNG_Own'] = portfolio.iloc[:,2].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['MID_Own'] = portfolio.iloc[:,3].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['ADC_Own'] = portfolio.iloc[:,4].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['SUP_Own'] = portfolio.iloc[:,5].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['Team_Own'] = portfolio.iloc[:,6].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1) | |
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100 | |
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1) | |
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1) | |
# Calculate dupes formula | |
portfolio['dupes_calc'] = ((portfolio['own_product'] * 10) * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 50) - ((max_salary - portfolio['salary']) / 50) | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000))) | |
# Round and handle negative values | |
portfolio['Dupes'] = np.where( | |
portfolio['salary'] == max_salary, | |
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10), | |
portfolio['dupes_calc'] | |
) | |
portfolio['Dupes'] = np.where( | |
np.round(portfolio['Dupes'], 0) <= 0, | |
0, | |
np.round(portfolio['Dupes'], 0) - 1 | |
) | |
elif sport_var == 'GOLF': | |
num_players = len([col for col in portfolio.columns if col not in ['salary', 'median', 'Own']]) | |
dup_count_columns = [f'player_{i}_percent_rank' for i in range(1, num_players + 1)] | |
own_columns = [f'player_{i}_own' for i in range(1, num_players + 1)] | |
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio'] | |
# Get the original player columns (first num_players columns excluding salary, median, Own) | |
player_columns = [col for col in portfolio.columns[:num_players] if col not in ['salary', 'median', 'Own']] | |
for i in range(1, num_players + 1): | |
portfolio[f'player_{i}_percent_rank'] = portfolio.iloc[:,i-1].map(maps_dict['own_percent_rank']) | |
portfolio[f'player_{i}_own'] = portfolio.iloc[:,i-1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) * max(Contest_Size / 10000, 1) | |
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100 | |
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1) | |
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1) | |
portfolio['dupes_calc'] = (portfolio['own_product'] * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100) | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000))) | |
# Round and handle negative values | |
portfolio['Dupes'] = np.where( | |
portfolio['salary'] == max_salary, | |
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10), | |
portfolio['dupes_calc'] | |
) | |
portfolio['Dupes'] = np.where( | |
np.round(portfolio['Dupes'], 0) <= 0, | |
0, | |
np.round(portfolio['Dupes'], 0) - 1 | |
) | |
else: | |
num_players = len([col for col in portfolio.columns if col not in ['salary', 'median', 'Own']]) | |
dup_count_columns = [f'player_{i}_percent_rank' for i in range(1, num_players + 1)] | |
own_columns = [f'player_{i}_own' for i in range(1, num_players + 1)] | |
calc_columns = ['own_product', 'own_average', 'own_sum', 'avg_own_rank', 'dupes_calc', 'low_own_count', 'Ref_Proj', 'Max_Proj', 'Min_Proj', 'Avg_Ref', 'own_ratio'] | |
# Get the original player columns (first num_players columns excluding salary, median, Own) | |
player_columns = [col for col in portfolio.columns[:num_players] if col not in ['salary', 'median', 'Own']] | |
for i in range(1, num_players + 1): | |
portfolio[f'player_{i}_percent_rank'] = portfolio.iloc[:,i-1].map(maps_dict['own_percent_rank']) | |
portfolio[f'player_{i}_own'] = portfolio.iloc[:,i-1].map(maps_dict['own_map']).astype('float32') / 100 | |
portfolio['own_product'] = (portfolio[own_columns].product(axis=1)) | |
portfolio['own_average'] = (portfolio['Own'].max() * .33) / 100 | |
portfolio['own_sum'] = portfolio[own_columns].sum(axis=1) | |
portfolio['avg_own_rank'] = portfolio[dup_count_columns].mean(axis=1) | |
portfolio['dupes_calc'] = (portfolio['own_product'] * portfolio['avg_own_rank']) * Contest_Size + ((portfolio['salary'] - (max_salary - portfolio['Own'])) / 100) - ((max_salary - portfolio['salary']) / 100) | |
portfolio['dupes_calc'] = portfolio['dupes_calc'] * dupes_multiplier * (portfolio['Own'] / (90 + (Contest_Size / 1000))) | |
# Round and handle negative values | |
portfolio['Dupes'] = np.where( | |
portfolio['salary'] == max_salary, | |
portfolio['dupes_calc'] + (portfolio['dupes_calc'] * .10), | |
portfolio['dupes_calc'] | |
) | |
portfolio['Dupes'] = np.where( | |
np.round(portfolio['Dupes'], 0) <= 0, | |
0, | |
np.round(portfolio['Dupes'], 0) - 1 | |
) | |
portfolio['Dupes'] = np.round(portfolio['Dupes'], 0) | |
portfolio['own_ratio'] = np.where( | |
portfolio[own_columns].isin([max_ownership]).any(axis=1), | |
portfolio['own_sum'] / portfolio['own_average'], | |
(portfolio['own_sum'] - max_ownership) / portfolio['own_average'] | |
) | |
percentile_cut_scalar = portfolio['median'].max() | |
if type_var == 'Classic': | |
if sport_var == 'CS2': | |
own_ratio_nerf = 2 | |
elif sport_var == 'LOL': | |
own_ratio_nerf = 2 | |
else: | |
own_ratio_nerf = 1.5 | |
elif type_var == 'Showdown': | |
own_ratio_nerf = 1.5 | |
portfolio['Finish_percentile'] = portfolio.apply( | |
lambda row: .0005 if (row['own_ratio'] - own_ratio_nerf) / ((5 * (row['median'] / percentile_cut_scalar)) / 3) < .0005 | |
else ((row['own_ratio'] - own_ratio_nerf) / ((5 * (row['median'] / percentile_cut_scalar)) / 3)) / 2, | |
axis=1 | |
) | |
portfolio['Ref_Proj'] = portfolio['median'].max() | |
portfolio['Max_Proj'] = portfolio['Ref_Proj'] + 10 | |
portfolio['Min_Proj'] = portfolio['Ref_Proj'] - 10 | |
portfolio['Avg_Ref'] = (portfolio['Max_Proj'] + portfolio['Min_Proj']) / 2 | |
portfolio['Win%'] = (((portfolio['median'] / portfolio['Avg_Ref']) - (0.1 + ((portfolio['Ref_Proj'] - portfolio['median'])/100))) / (Contest_Size / 1000)) / 10 | |
max_allowed_win = (1 / Contest_Size) * 5 | |
portfolio['Win%'] = portfolio['Win%'] / portfolio['Win%'].max() * max_allowed_win | |
portfolio['Finish_percentile'] = portfolio['Finish_percentile'] + .005 + (.005 * (Contest_Size / 10000)) | |
portfolio['Finish_percentile'] = portfolio['Finish_percentile'] * percentile_multiplier * (portfolio['Own'] / (100 + (Contest_Size / 1000))) | |
portfolio['Win%'] = portfolio['Win%'] * (1 - portfolio['Finish_percentile']) | |
portfolio['Win%'] = portfolio['Win%'].clip(lower=0, upper=max_allowed_win) | |
portfolio['low_own_count'] = portfolio[own_columns].apply(lambda row: (row < 0.10).sum(), axis=1) | |
portfolio['Finish_percentile'] = portfolio.apply(lambda row: row['Finish_percentile'] if row['low_own_count'] <= 0 else row['Finish_percentile'] / row['low_own_count'], axis=1) | |
portfolio['Lineup Edge'] = (portfolio['Win%'] * ((.5 - portfolio['Finish_percentile']) * (Contest_Size / 2.5))) | |
# portfolio['Lineup Edge'] = portfolio.apply(lambda row: row['Lineup Edge'] / (row['Dupes'] + 1) if row['Dupes'] > 0 else row['Lineup Edge'], axis=1) | |
portfolio['Lineup Edge'] = ((portfolio['Lineup Edge'] - portfolio['Lineup Edge'].mean())) - ((portfolio['Dupes'] - portfolio['Dupes'].mean()) / 50) | |
max_edge = portfolio['Lineup Edge'].max() | |
portfolio['Lineup Edge'] = 2 * max_edge * (portfolio['Lineup Edge'] - portfolio['Lineup Edge'].min()) / (portfolio['Lineup Edge'].max() - portfolio['Lineup Edge'].min()) - max_edge | |
portfolio['Weighted Own'] = portfolio[own_columns].apply(calculate_weighted_ownership_wrapper, axis=1) | |
portfolio['Geomean'] = np.power((portfolio[own_columns] * 100).product(axis=1), 1 / len(own_columns)) | |
# Calculate similarity score based on actual player selection | |
portfolio['Diversity'] = calculate_player_similarity_score_chunked(portfolio, player_columns) | |
# check_portfolio = portfolio.copy() | |
portfolio = portfolio.drop(columns=dup_count_columns) | |
portfolio = portfolio.drop(columns=own_columns) | |
portfolio = portfolio.drop(columns=calc_columns) | |
int16_columns_stacks = ['Dupes', 'Size', 'salary'] | |
int16_columns_nstacks = ['Dupes', 'salary'] | |
float32_columns = ['median', 'Own', 'Finish_percentile', 'Win%', 'Lineup Edge', 'Weighted Own', 'Geomean', 'Diversity'] | |
try: | |
portfolio[int16_columns_stacks] = portfolio[int16_columns_stacks].astype('uint16') | |
except: | |
pass | |
try: | |
portfolio[int16_columns_nstacks] = portfolio[int16_columns_nstacks].astype('uint16') | |
except: | |
pass | |
if sport_var != 'LOL': | |
try: | |
portfolio[float32_columns] = portfolio[float32_columns].astype('float32') | |
except: | |
pass | |
return portfolio | |