prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Technical analysis on a trading Pandas DataFrame"""
from numpy import floor
from re import compile
from numpy import maximum, mean, minimum, nan, ndarray, round
from numpy import sum as np_sum
from numpy import where
from pandas import DataFrame, Series
from statsmodels.tsa.statespace.sarimax import SARIMAX
class TechnicalAnalysis():
def __init__(self, data= | DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.stats import mode
from sklearn.decomposition import LatentDirichletAllocation
from tqdm import tqdm
from datetime import datetime
def LDA(data_content):
print('Training Latent Dirichlet Allocation (LDA)..', flush=True)
lda = LatentDirichletAllocation(n_components=data_content.number_of_topics,
learning_decay=data_content.learning_decay,
learning_offset=data_content.learning_offset,
batch_size=data_content.batch_size,
evaluate_every=data_content.evaluate_every,
random_state=data_content.random_state,
max_iter=data_content.max_iter).fit(data_content.X)
print('Latent Dirichlet Allocation (LDA) trained successfully...\n', flush=True)
return lda
def get_tour_collection(fb, cdf, typ_event):
tour_collection = {}
pbar = tqdm(total=fb.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 1 of 3')
for idx, _ in fb.iterrows():
bik = fb.loc[idx, 'friends']
cell = [-1, -1, -1, -1,
-1, -1, -1, -1]
# Looking for friends
if len(bik) != 0:
bik = bik.split()
c = cdf[cdf['biker_id'].isin(bik)]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = (' '.join(c[te].tolist())).split()
if len(ce) != 0:
cell[i] = ce
# Looking for personal
bik = fb.loc[idx, 'biker_id']
c = cdf[cdf['biker_id'] == bik]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = c[te].tolist()[0].split()
if len(c) != 0:
cell[len(typ_event) + i] = ce
tour_collection[fb.loc[idx, 'biker_id']] = cell
pbar.update(1)
pbar.close()
return tour_collection
def find_interest_group(temp_df, data_content):
if temp_df.shape[0] == 0:
return np.zeros((1, data_content.number_of_topics))
pred = data_content.lda.transform(temp_df[data_content.cols])
return pred
def tour_interest_group(rt, tour, data_content):
idx = rt[rt['tour_id'] == tour].index
h = data_content.lda.transform(rt.loc[idx, data_content.cols])
return h
def predict_preference(dataframe, data_content, typ_event=None):
if typ_event is None:
typ_event = ['going', 'not_going', 'maybe', 'invited']
bikers = dataframe['biker_id'].drop_duplicates().tolist()
fb = data_content.bikers_network_df[data_content.bikers_network_df['biker_id'].isin(bikers)]
all_biker_friends = bikers.copy()
for idx, _ in fb.iterrows():
bik = fb.loc[idx, 'friends']
if len(bik) != 0:
all_biker_friends += bik.split()
cdf = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(all_biker_friends)]
tdf = []
for te in typ_event:
tdf += (' '.join(cdf[te].tolist())).split()
temp_df = data_content.tours_df[data_content.tours_df['tour_id'].isin(tdf)]
tour_collection = get_tour_collection(fb, cdf, typ_event)
rt = data_content.tours_df[data_content.tours_df['tour_id'].isin(dataframe['tour_id'].drop_duplicates().tolist())]
for te in typ_event:
dataframe['fscore_' + te] = 0
dataframe['pscore_' + te] = 0
pbar = tqdm(total=len(bikers), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 2 of 3')
for biker in bikers:
sdf = dataframe[dataframe['biker_id'] == biker]
sub = tour_collection[biker]
for i, te in enumerate(typ_event):
frds_tur = sub[i]
pers_tur = sub[len(typ_event) + i]
ft, pt = False, False
if type(frds_tur) != int:
kdf = temp_df[temp_df['tour_id'].isin(frds_tur)]
frds_lat = find_interest_group(kdf, data_content)
ft = True
if type(pers_tur) != int:
udf = temp_df[temp_df['tour_id'].isin(pers_tur)]
pers_lat = find_interest_group(udf, data_content)
pt = True
for idx, _ in sdf.iterrows():
tour = sdf.loc[idx, 'tour_id']
mat = tour_interest_group(rt, tour, data_content)
if ft:
# noinspection PyUnboundLocalVariable
dataframe.loc[idx, 'fscore_' + te] = np.median(np.dot(frds_lat, mat.T).ravel())
if pt:
# noinspection PyUnboundLocalVariable
dataframe.loc[idx, 'pscore_' + te] = np.median(np.dot(pers_lat, mat.T).ravel())
pbar.update(1)
pbar.close()
return dataframe
def get_organizers(dataframe, data_content):
bikers = dataframe['biker_id'].drop_duplicates().tolist()
fb = data_content.bikers_network_df[data_content.bikers_network_df['biker_id'].isin(bikers)]
rt = data_content.tours_df[data_content.tours_df['tour_id'].isin(
dataframe['tour_id'].drop_duplicates().tolist())]
tc = data_content.tour_convoy_df[data_content.tour_convoy_df['tour_id'].isin(
dataframe['tour_id'].drop_duplicates().tolist())]
lis = ['going', 'not_going', 'maybe', 'invited']
dataframe['org_frd'] = 0
dataframe['frd_going'] = 0
dataframe['frd_not_going'] = 0
dataframe['frd_maybe'] = 0
dataframe['frd_invited'] = 0
pbar = tqdm(total=len(bikers), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 3 of 3')
for biker in bikers:
tmp = dataframe[dataframe['biker_id'] == biker]
frd = fb[fb['biker_id'] == biker]['friends'].tolist()[0].split()
for idx, _ in tmp.iterrows():
trs = tc[tc['tour_id'] == tmp.loc[idx, 'tour_id']]
org = rt[rt['tour_id'] == tmp.loc[idx, 'tour_id']]['biker_id'].tolist()[0]
if org in frd:
dataframe.loc[idx, 'org_frd'] = 1
if trs.shape[0] > 0:
for l in lis:
t = trs[l].tolist()[0]
if not pd.isna(t):
t = t.split()
dataframe.loc[idx, 'frd_' + l] = len(set(t).intersection(frd))
pbar.update(1)
pbar.close()
return dataframe
def set_preference_score(dataframe, data_content):
if data_content.preference_feat:
dataframe = predict_preference(dataframe, data_content, typ_event=['going', 'not_going'])
else:
print('Skipping Step 1 & 2...Not required due to reduced noise...', flush=True)
dataframe = get_organizers(dataframe, data_content)
print('Preferences extracted...\n', flush=True)
return dataframe
def calculate_distance(x1, y1, x2, y2):
if np.isnan(x1):
return 0
else:
R = 6373.0
x1, y1 = np.radians(x1), np.radians(y1)
x2, y2 = np.radians(x2), np.radians(y2)
dlon = x2 - x1
dlat = y2 - y1
a = np.sin(dlat / 2) ** 2 + np.cos(x1) * np.cos(x2) * np.sin(dlon / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
return R * c
def append_latent_factors(df, data_content):
cam = ['w' + str(i) for i in range(1, 101)] + ['w_other']
out = data_content.lda.transform(df[cam])
out[out >= (1 / data_content.number_of_topics)] = 1
out[out < (1 / data_content.number_of_topics)] = 0
for r in range(data_content.number_of_topics):
df['f' + str(r + 1)] = out[:, r]
return df
def transform(df, data_content):
tr_df = | pd.merge(df, data_content.bikers_df, on='biker_id', how='left') | pandas.merge |
#Author: <NAME>. Email: <EMAIL>
#Packaged by: <NAME>. Email: <EMAIL>
#This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
#To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to Creative Commons,
#PO Box 1866, Mountain View, CA 94042, USA.
import numpy as np
import pandas as pd
import pickle
import os
import sys
def save_results_raw(args, P_dict, num_instances, w_dict):
"""
Save results from BASS as raw output.
Use in order to have raw dictionary to perform further decoding
Parameters:
args: Argparse object containing general parameters:
P_dict: Probabilities of dictionary to be saved
num_instances: Number of instances of motif
w_dict: the motifs output by BASS in numerical form
"""
save_path = args.Out + args.DataName +'/'+ args.Exp + '_condition_{}'.format(args.Condition)
if not(os.path.exists(save_path)):
os.makedirs(save_path)
outfile = open(save_path + '/BASSresults','wb')
pickle.dump([P_dict, num_instances, w_dict],outfile)
outfile.close()
def save_results_classnames(args, P_dict, num_instances, w_dict, class_names):
"""
Save results from BASS as a csv file with the class names as motifs
Parameters:
args: Argparse object containing general parameters
P_dict: Probabilities of dictionary to be saved
num_instances: Number of instances of motif
w_dict: the motifs output in numerical form
class_names: the associated names of the bout types`
"""
save_path = args.Out + args.DataName +'/' + args.Exp + '_condition_{}'.format(args.Condition)
if not(os.path.exists(save_path)):
os.makedirs(save_path)
motifs = []
for i, w in enumerate(w_dict):
motif = [class_names[a] for a in w]
motifs.append(str(motif))
full_dict = | pd.DataFrame({'Probability':P_dict,'Number of occurences':num_instances,'Sequences':motifs}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from newsapi import NewsApiClient
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from os import path
from .. import app, db
from .news_formatting import NewsFormatting
"""
This file contain multiple class and functions
that collaborate each other for the news scraping.
@author: Alericcardi
@version: 1.0.0
"""
class NewsScanner:
"""
NewsScanner class.
Called from the ajax_news_process() in the pages.py
and permit to retrieve headline news and scrap their
content (if possible).
@author: Alericcardi
@version: 1.0.0
"""
NUM_NEWS_TO_SCRAP = 40
m_scraper = None
m_scraped_news = None
m_report = None
m_source_name = None
m_web_scrape_sources = None
m_num_new_authors = 0
m_num_new_news = 0
m_num_total_news = 0
def __init__(self):
self.m_scraper = Scraper()
self.m_source_name, self.m_web_scrape_sources = get_sources()
def run_scraper(self):
basic_news = self.download_basic_news()
# scrape the content
scraped_news = self.scrap_news(basic_news, self.m_web_scrape_sources)
scraped_news = self.set_data_type(scraped_news)
return self.upload_to_db(scraped_news)
# web scraper
def scrap_news(self, news, web_scrape_sources):
"""
Permit to scrape the news and get the content.
The scraper is limited by the number of source that are passed
and they need to have the right tags to permit to find the blocks
that contain them.
:param news: pandas df with the news (without content)
:param web_scrape_sources: the sources with the tags
:return: return the input panda df (news) but with the content column.
"""
report = pd.DataFrame(columns=['id', 'success', 'status'])
contents_scraped = []
return_news = news
for index, the_news in news.iterrows():
content_found = ''
raw_html = self.m_scraper.simple_get(the_news.url)
if raw_html is None:
report.loc[index] = [index, False, 'no html']
else:
html = BeautifulSoup(raw_html, 'html.parser')
body = self.m_scraper.get_body(the_news, html, web_scrape_sources)
if body is None:
report.loc[index] = [index, False, 'no body']
else:
contents = self.m_scraper.get_content(the_news, body, web_scrape_sources)
for content in contents:
if content.text is not None and len(content.text) > 50:
content_found += content.text.strip() + ' <br><br> '
report.loc[index] = [index, True, '']
# if content retrived if empty
if content_found == '':
report.loc[index] = [index, False, 'content empty']
contents_scraped.append(content_found)
return_news['content'] = contents_scraped
self.m_scraped_news = return_news[return_news['content'] != '']
self.m_report = self.report_status(report) # report status
return self.m_scraped_news
def download_basic_news(self):
"""
:return:
"""
columns = ['id_source', 'name', 'author', 'title', 'description', 'url', 'urlToImage', 'publishedAt', 'content']
request_number = self.NUM_NEWS_TO_SCRAP
# DATA PREPARATION
# Building and executing the request to newsapi.org
newsapi = NewsApiClient(api_key='a11cabb5333f4ade87a27e20f28bb568')
all_articles = newsapi.get_top_headlines(sources=self.m_source_name,
language='en',
page_size=request_number)
# DATA FORMATTING
data = | pd.DataFrame.from_dict(all_articles) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from os.path import join, normpath, isfile
from datetime import timedelta
from cbm.utils import config
from cbm.get import parcel_info, time_series
def ndvi(aoi, pid):
path = normpath(join(config.get_value(['paths', 'temp']), aoi, str(pid)))
file_info = normpath(join(path, 'info.json'))
if not isfile(file_info):
parcel_info.by_pid(aoi, pid)
with open(file_info, 'r') as f:
info_data = json.loads(f.read())
crop_name = info_data['cropname'][0]
area = info_data['area'][0]
file_ts = normpath(join(path, 'time_series_s2.csv'))
if not isfile(file_ts):
time_series.by_pid(aoi, pid, 's2')
df = | pd.read_csv(file_ts, index_col=0) | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount": pandas.StringDtype(),
"mprLiteReads": pandas.StringDtype(),
"eccErrOnMprLiteReads": pandas.StringDtype(),
"readForwardingXpPreReadCount": pandas.StringDtype(),
"readForwardingUpPreReadCount": pandas.StringDtype(),
"readForwardingLpPreReadCount": pandas.StringDtype(),
"pweDefectCompensationCredit": pandas.StringDtype(),
"planarXorRebuildFailure": pandas.StringDtype(),
"itgXorRebuildFailure": pandas.StringDtype(),
"planarXorRebuildSuccess": pandas.StringDtype(),
"itgXorRebuildSuccess": pandas.StringDtype(),
"xorLoggingSkippedSIcBand": pandas.StringDtype(),
"xorLoggingSkippedDieOffline": pandas.StringDtype(),
"xorLoggingSkippedDieAbsent": pandas.StringDtype(),
"xorLoggingSkippedBandErased": pandas.StringDtype(),
"xorLoggingSkippedNoEntry": pandas.StringDtype(),
"xorAuditSuccess": pandas.StringDtype(),
"maxSuspendCount": pandas.StringDtype(),
"suspendLimitPerPrgm": pandas.StringDtype(),
"psrCountStats": pandas.StringDtype(),
"readNandBuffCount": pandas.StringDtype(),
"readNandBufferRspErrorCount": pandas.StringDtype(),
"ddpNandWrites": pandas.StringDtype(),
"totalDeallocatedSectorsInCore": pandas.StringDtype(),
"prefetchHostReads": pandas.StringDtype(),
"hostReadtoDSMDCount": pandas.StringDtype(),
"hostWritetoDSMDCount": pandas.StringDtype(),
"snapReads4k": pandas.StringDtype(),
"snapReads8k": pandas.StringDtype(),
"snapReads16k": pandas.StringDtype(),
"xorLoggingTriggered": pandas.StringDtype(),
"xorLoggingAborted": pandas.StringDtype(),
"xorLoggingSkippedHistory": | pandas.StringDtype() | pandas.StringDtype |
import json
import math
import operator
import warnings
import numpy as np
import pandas as pd
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=np.RankWarning)
np.seterr(divide='ignore', invalid='ignore')
def ema(series, period):
values = np.zeros(len(series))
period = 2.0 / (period + 1)
for i, val in enumerate(series):
values[i] = val if i == 0 else period * val + (1 - period) * values[i - 1]
return values
def sma(series, period):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
values[i] = sum(series_slice) / min(i + 1, period)
return values
def change(series):
values = np.zeros(len(series))
for i, val in enumerate(series):
values[i] = 0 if i == 0 else val - series[i - 1]
return values
def linreg(series, period, offset):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
coefs = np.polyfit([i for i in range(len(series_slice))], series_slice, 1)
slope = coefs[0]
intercept = coefs[1]
values[i] = intercept + slope * (period - 1 - offset)
return values
def cci(series, period):
values = np.zeros(len(series))
for i, val in enumerate(series):
series_slice = series[:i + 1][-min(i + 1, period):]
current_sma = sma(series_slice, period)[-1]
values[i] = (val - current_sma) / (0.015 * sum([abs(x - current_sma) for x in series_slice]) / period)
return values
def ohlc4(close_prices, open_prices, high_prices, low_prices):
values = np.zeros(len(close_prices))
for i, val in enumerate(close_prices):
values[i] = ((close_prices[i] + open_prices[i] + high_prices[i] + low_prices[i]) / 4)
return values
# call the pinescript code every X minute and make sure that the call happen only when there is an update
def apply_strategy():
global ticks, bars, bars_len, open_orders_count, last_time, tape_len, order_size, current_active, active_order
if len(lob.tape) > tape_len and pd.Timedelta(lob.time, unit='ms') - pd.Timedelta(last_time,
unit='ms') >= | pd.Timedelta(period) | pandas.Timedelta |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import datetime
import functools
import warnings
from abc import ABC, abstractmethod
import collections
from collections.abc import Sequence
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
import toolz
from pandas import DataFrame, date_range
from pandas.tseries.holiday import AbstractHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from pytz import UTC
from exchange_calendars import errors
from .calendar_helpers import (
NANOSECONDS_PER_MINUTE,
NP_NAT,
Date,
Minute,
Session,
TradingMinute,
_TradingIndex,
compute_minutes,
next_divider_idx,
one_minute_earlier,
one_minute_later,
parse_date,
parse_session,
parse_timestamp,
parse_trading_minute,
previous_divider_idx,
)
from .utils.memoize import lazyval
from .utils.pandas_utils import days_at_time
GLOBAL_DEFAULT_START = pd.Timestamp.now(tz=UTC).floor("D") - pd.DateOffset(years=20)
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
GLOBAL_DEFAULT_END = pd.Timestamp.now(tz=UTC).floor("D") + pd.DateOffset(years=1)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY)
WEEKENDS = (SATURDAY, SUNDAY)
def selection(arr, start, end):
predicates = []
if start is not None:
predicates.append(start.tz_localize(UTC) <= arr)
if end is not None:
predicates.append(arr < end.tz_localize(UTC))
if not predicates:
return arr
return arr[np.all(predicates, axis=0)]
def _group_times(all_days, times, tz, offset=0):
if times is None:
return None
elements = [
days_at_time(selection(all_days, start, end), time, tz, offset)
for (start, time), (end, _) in toolz.sliding_window(
2, toolz.concatv(times, [(None, None)])
)
]
return elements[0].append(elements[1:])
class deprecate:
"""Decorator for deprecated/renamed ExchangeCalendar methods."""
def __init__(
self,
deprecated_release: str = "3.4",
removal_release: str = "4.0",
alt: str = "",
renamed: bool = True,
prop: bool = False,
):
self.deprecated_release = "release " + deprecated_release
self.removal_release = "release " + removal_release
self.alt = alt
self.renamed = renamed
if renamed:
assert alt, "pass `alt` if renaming"
self.obj_type = "property" if prop else "method"
self.is_method = not prop
def __call__(self, f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
warnings.warn(self._message(f), FutureWarning)
return f(*args, **kwargs)
return wrapped_f
def _message(self, f):
msg = (
f"`{f.__name__}` was deprecated in {self.deprecated_release}"
f" and will be removed in {self.removal_release}."
)
if self.alt:
if self.renamed:
msg += f" The {self.obj_type} has been renamed `{self.alt}`."
if self.is_method:
msg += (
f" NB parameter names may also have changed (see "
f" documentation for `{self.alt}`)."
)
else:
msg += f" Use `{self.alt}`."
return msg
class HolidayCalendar(AbstractHolidayCalendar):
def __init__(self, rules):
super(HolidayCalendar, self).__init__(rules=rules)
class ExchangeCalendar(ABC):
"""Representation of timing information of a single market exchange.
The timing information comprises sessions, open/close times and, for
exchanges that observe an intraday break, break_start/break_end times.
For exchanges that do not observe an intraday break a session
represents a contiguous set of minutes. Where an exchange observes
an intraday break a session represents two contiguous sets of minutes
separated by the intraday break.
Each session has a label that is midnight UTC. It is important to note
that a session label should not be considered a specific point in time,
and that midnight UTC is just being used for convenience.
For each session, we store the open and close time together with, for
those exchanges with breaks, the break start and break end. All times
are defined as UTC.
Parameters
----------
start : default: later of 20 years ago or first supported start date.
First calendar session will be `start`, if `start` is a session, or
first session after `start`.
end : default: earliest of 1 year from 'today' or last supported end date.
Last calendar session will be `end`, if `end` is a session, or last
session before `end`.
side : default: "both" ("left" for 24 hour calendars)
Define which of session open/close and break start/end should
be treated as a trading minute:
"left" - treat session open and break_start as trading minutes,
do not treat session close or break_end as trading minutes.
"right" - treat session close and break_end as trading minutes,
do not treat session open or break_start as tradng minutes.
"both" - treat all of session open, session close, break_start
and break_end as trading minutes.
"neither" - treat none of session open, session close,
break_start or break_end as trading minutes.
Raises
------
ValueError
If `start` is earlier than the earliest supported start date.
If `end` is later than the latest supported end date.
If `start` parses to a later date than `end`.
Notes
-----
Exchange calendars were originally defined for the Zipline package from
Quantopian under the package 'trading_calendars'. Since 2021 they have
been maintained under the 'exchange_calendars' package (a fork of
'trading_calendars') by an active community of contributing users.
Some calendars have defined start and end bounds within which
contributors have endeavoured to ensure the calendar's accuracy and
outside of which the calendar would not be accurate. These bounds
are enforced such that passing `start` or `end` as dates that are
out-of-bounds will raise a ValueError. The bounds of each calendar are
exposed via the `bound_start` and `bound_end` properties.
Many calendars do not have bounds defined (in these cases `bound_start`
and/or `bound_end` return None). These calendars can be created through
any date range although it should be noted that the earlier the start
date, the greater the potential for inaccuracies.
In all cases, no guarantees are offered as to the accuracy of any
calendar.
Internal method parameters:
_parse: bool
Determines if a `minute` or `session` parameter should be
parsed (default True). Passed as False:
- internally to prevent double parsing.
- by tests for efficiency.
"""
_LEFT_SIDES = ["left", "both"]
_RIGHT_SIDES = ["right", "both"]
def __init__(
self,
start: Date | None = None,
end: Date | None = None,
side: str | None = None,
):
side = side if side is not None else self.default_side()
if side not in self.valid_sides():
raise ValueError(
f"`side` must be in {self.valid_sides()} although received as {side}."
)
self._side = side
if start is None:
start = self.default_start
else:
start = parse_date(start, "start", raise_oob=False)
if self.bound_start is not None and start < self.bound_start:
raise ValueError(self._bound_start_error_msg(start))
if end is None:
end = self.default_end
else:
end = parse_date(end, "end", raise_oob=False)
if self.bound_end is not None and end > self.bound_end:
raise ValueError(self._bound_end_error_msg(end))
if start >= end:
raise ValueError(
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end}'."
)
# Midnight in UTC for each trading day.
_all_days = date_range(start, end, freq=self.day, tz=UTC)
if _all_days.empty:
raise errors.NoSessionsError(calendar_name=self.name, start=start, end=end)
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = _group_times(
_all_days,
self.open_times,
self.tz,
self.open_offset,
)
self._break_starts = _group_times(
_all_days,
self.break_start_times,
self.tz,
)
self._break_ends = _group_times(
_all_days,
self.break_end_times,
self.tz,
)
self._closes = _group_times(
_all_days,
self.close_times,
self.tz,
self.close_offset,
)
# Apply any special offsets first
self.apply_special_offsets(_all_days, start, end)
# Series mapping sessions with nonstandard opens/closes.
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
_remove_breaks_for_special_dates(
_all_days,
self._break_starts,
_special_closes,
)
_remove_breaks_for_special_dates(
_all_days,
self._break_ends,
_special_closes,
)
if self._break_starts is None:
break_starts = None
else:
break_starts = self._break_starts.tz_localize(None)
if self._break_ends is None:
break_ends = None
else:
break_ends = self._break_ends.tz_localize(None)
self.schedule = DataFrame(
index=_all_days,
data=collections.OrderedDict(
[
("market_open", self._opens.tz_localize(None)),
("break_start", break_starts),
("break_end", break_ends),
("market_close", self._closes.tz_localize(None)),
]
),
dtype="datetime64[ns]",
)
self.opens_nanos = self.schedule.market_open.values.astype(np.int64)
self.break_starts_nanos = self.schedule.break_start.values.astype(np.int64)
self.break_ends_nanos = self.schedule.break_end.values.astype(np.int64)
self.closes_nanos = self.schedule.market_close.values.astype(np.int64)
_check_breaks_match(self.break_starts_nanos, self.break_ends_nanos)
self._late_opens = _special_opens.index
self._early_closes = _special_closes.index
# Methods and properties that define calendar and which should be
# overriden or extended, if and as required, by subclass.
@property
@abstractmethod
def name(self) -> str:
raise NotImplementedError()
@property
def bound_start(self) -> pd.Timestamp | None:
"""Earliest date from which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Earliest date from which calendar can be constructed. Must have
tz as "UTC". None if no limit.
Notes
-----
To impose a constraint on the earliest date from which a calendar
can be constructed subclass should override this method and
optionally override `_bound_start_error_msg`.
"""
return None
@property
def bound_end(self) -> pd.Timestamp | None:
"""Latest date to which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Latest date to which calendar can be constructed. Must have tz
as "UTC". None if no limit.
Notes
-----
To impose a constraint on the latest date to which a calendar can
be constructed subclass should override this method and optionally
override `_bound_end_error_msg`.
"""
return None
def _bound_start_error_msg(self, start: pd.Timestamp) -> str:
"""Return error message to handle `start` being out-of-bounds.
See Also
--------
bound_start
"""
return (
f"The earliest date from which calendar {self.name} can be"
f" evaluated is {self.bound_start}, although received `start` as"
f" {start}."
)
def _bound_end_error_msg(self, end: pd.Timestamp) -> str:
"""Return error message to handle `end` being out-of-bounds.
See Also
--------
bound_end
"""
return (
f"The latest date to which calendar {self.name} can be evaluated"
f" is {self.bound_end}, although received `end` as {end}."
)
@property
def default_start(self) -> pd.Timestamp:
if self.bound_start is None:
return GLOBAL_DEFAULT_START
else:
return max(GLOBAL_DEFAULT_START, self.bound_start)
@property
def default_end(self) -> pd.Timestamp:
if self.bound_end is None:
return GLOBAL_DEFAULT_END
else:
return min(GLOBAL_DEFAULT_END, self.bound_end)
@property
@abstractmethod
def tz(self):
raise NotImplementedError()
@property
@abstractmethod
def open_times(self) -> Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local open time(s).
Returns
-------
Sequence[tuple[pd.Timestamp | None, datetime.time]]:
Sequence of tuples representing (start_date, open_time) where:
start_date: date from which `open_time` applies. None for
first item.
open_time: exchange's local open time.
Notes
-----
Examples for concreting `open_times` on a subclass.
Example where open time is constant throughout period covered by
calendar:
open_times = ((None, datetime.time(9)),)
Example where open times have varied over period covered by
calendar:
open_times = (
(None, time(9, 30)),
(pd.Timestamp("1978-04-01"), datetime.time(10, 0)),
(pd.Timestamp("1986-04-01"), datetime.time(9, 40)),
(pd.Timestamp("1995-01-01"), datetime.time(9, 30)),
(pd.Timestamp("1998-12-07"), datetime.time(9, 0)),
)
"""
raise NotImplementedError()
@property
def break_start_times(
self,
) -> None | Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local break start time(s).
As `close_times` although times represent the close of the morning
subsession. None if exchange does not observe a break.
"""
return None
@property
def break_end_times(
self,
) -> None | Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local break end time(s).
As `open_times` although times represent the open of the afternoon
subsession. None if exchange does not observe a break.
"""
return None
@property
@abstractmethod
def close_times(self) -> Sequence[tuple[pd.Timestamp | None, datetime.time]]:
"""Local close time(s).
Returns
-------
Sequence[tuple[pd.Timestamp | None, datetime.time]]:
Sequence of tuples representing (start_date, close_time) where:
start_date: date from which `close_time` applies. None for
first item.
close_time: exchange's local close time.
Notes
-----
Examples for concreting `close_times` on a subclass.
Example where close time is constant throughout period covered by
calendar:
close_times = ((None, time(17, 30)),)
Example where close times have varied over period covered by
calendar:
close_times = (
(None, datetime.time(17, 30)),
(pd.Timestamp("1986-04-01"), datetime.time(17, 20)),
(pd.Timestamp("1995-01-01"), datetime.time(17, 0)),
(pd.Timestamp("2016-08-01"), datetime.time(17, 30)),
)
"""
raise NotImplementedError()
@property
def weekmask(self) -> str:
"""Indicator of weekdays on which the exchange is open.
Default is '1111100' (i.e. Monday-Friday).
See Also
--------
numpy.busdaycalendar
"""
return "1111100"
@property
def open_offset(self) -> int:
"""Day offset of open time(s) relative to session.
Returns
-------
int
0 if the date components of local open times are as the
corresponding session labels.
-1 if the date components of local open times are the day
before the corresponding session labels.
"""
return 0
@property
def close_offset(self) -> int:
"""Day offset of close time(s) relative to session.
Returns
-------
int
0 if the date components of local close times are as the
corresponding session labels.
1 if the date components of local close times are the day
after the corresponding session labels.
"""
return 0
@property
def regular_holidays(self) -> HolidayCalendar | None:
"""Holiday calendar representing calendar's regular holidays."""
return None
@property
def adhoc_holidays(self) -> list[pd.Timestamp]:
"""List of non-regular holidays.
Returns
-------
list[pd.Timestamp]
List of tz-naive timestamps representing non-regular holidays.
"""
return []
@property
def special_opens(self) -> list[tuple[datetime.time, HolidayCalendar]]:
"""Regular non-standard open times.
Example of what would be defined as a special open:
"EVERY YEAR on national lie-in day the exchange opens
at 13:00 rather than the standard 09:00".
Returns
-------
list[tuple[datetime.time, HolidayCalendar]]:
list of tuples each describing a regular non-standard open
time:
[0] datetime.time: regular non-standard open time.
[1] HolidayCalendar: holiday calendar describing occurence.
"""
return []
@property
def special_opens_adhoc(
self,
) -> list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
"""Adhoc non-standard open times.
Defines non-standard open times that cannot be otherwise codified
within within `special_opens`.
Example of an event to define as an adhoc special open:
"On 2022-02-14 due to a typhoon the exchange opened at 13:00,
rather than the standard 09:00".
Returns
-------
list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
List of tuples each describing an adhoc non-standard open time:
[0] datetime.time: non-standard open time.
[1] pd.Timestamp | list[pd.Timestamp]: date or dates
corresponding with the non-standard open time.
"""
return []
@property
def special_closes(self) -> list[tuple[datetime.time, HolidayCalendar]]:
"""Regular non-standard close times.
Example of what would be defined as a special close:
"On christmas eve the exchange closes at 14:00 rather than
the standard 17:00".
Returns
-------
list[tuple[datetime.time, HolidayCalendar]]:
list of tuples each describing a regular non-standard close
time:
[0] datetime.time: regular non-standard close time.
[1] HolidayCalendar: holiday calendar describing occurence.
"""
return []
@property
def special_closes_adhoc(
self,
) -> list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
"""Adhoc non-standard close times.
Defines non-standard close times that cannot be otherwise codified
within within `special_closes`.
Example of an event to define as an adhoc special close:
"On 2022-02-19 due to a typhoon the exchange closed at 12:00,
rather than the standard 16:00".
Returns
-------
list[tuple[datetime.time, pd.Timestamp | list[pd.Timestamp]]]:
List of tuples each describing an adhoc non-standard close
time:
[0] datetime.time: non-standard close time.
[1] pd.Timestamp | list[pd.Timestamp]: date or dates
corresponding with the non-standard close time.
"""
return []
def apply_special_offsets(self, _all_days, start, end) -> None:
"""Hook for subclass to apply changes.
Method executed by constructor prior to overwritting special dates.
Notes
-----
Incorporated to provide hook to `exchange_calendar_xkrx`.
"""
return None
# ------------------------------------------------------------------
# -- NO method below this line should be overriden on a subclass! --
# ------------------------------------------------------------------
# Methods and properties that define calendar (continued...).
@lazyval
def day(self):
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
)
@classmethod
def valid_sides(cls) -> list[str]:
"""List of valid `side` options."""
if cls.close_times == cls.open_times:
return ["left", "right"]
else:
return ["both", "left", "right", "neither"]
@classmethod
def default_side(cls) -> str:
"""Default `side` option."""
if cls.close_times == cls.open_times:
return "right"
else:
return "both"
@property
def side(self) -> str:
"""Side on which sessions are closed.
Returns
-------
str
"left" - Session open and break_start are trading minutes.
Session close and break_end are not trading minutes.
"right" - Session close and break_end are trading minutes,
Session open and break_start are not tradng minutes.
"both" - Session open, session close, break_start and
break_end are all trading minutes.
"neither" - Session open, session close, break_start and
break_end are all not trading minutes.
Notes
-----
Subclasses should NOT override this method.
"""
return self._side
# Properties covering all sessions.
@property
def sessions(self) -> pd.DatetimeIndex:
"""All calendar sessions."""
return self.schedule.index
@functools.lru_cache(maxsize=1)
def _sessions_nanos(self) -> np.ndarray:
return self.sessions.values.astype("int64")
@property
def sessions_nanos(self) -> np.ndarray:
"""All calendar sessions as nano seconds."""
return self._sessions_nanos()
@property
def opens(self) -> pd.Series:
"""Open time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Open time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_open
@property
def closes(self) -> pd.Series:
"""Close time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Close time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-start time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-end time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_end
@functools.lru_cache(maxsize=1)
def _first_minutes_nanos(self) -> np.ndarray:
if self.side in self._LEFT_SIDES:
return self.opens_nanos
else:
return one_minute_later(self.opens_nanos)
@property
def first_minutes_nanos(self) -> np.ndarray:
return self._first_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _last_minutes_nanos(self) -> np.ndarray:
if self.side in self._RIGHT_SIDES:
return self.closes_nanos
else:
return one_minute_earlier(self.closes_nanos)
@property
def last_minutes_nanos(self) -> np.ndarray:
return self._last_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _last_am_minutes_nanos(self) -> np.ndarray:
if self.side in self._RIGHT_SIDES:
return self.break_starts_nanos
else:
return one_minute_earlier(self.break_starts_nanos)
@property
def last_am_minutes_nanos(self) -> np.ndarray:
return self._last_am_minutes_nanos()
@functools.lru_cache(maxsize=1)
def _first_pm_minutes_nanos(self) -> np.ndarray:
if self.side in self._LEFT_SIDES:
return self.break_ends_nanos
else:
return one_minute_later(self.break_ends_nanos)
@property
def first_pm_minutes_nanos(self) -> np.ndarray:
return self._first_pm_minutes_nanos()
def _minutes_as_series(self, nanos: np.ndarray, name: str) -> pd.Series:
"""Convert trading minute nanos to pd.Series."""
ser = pd.Series(pd.DatetimeIndex(nanos, tz=UTC), index=self.sessions)
ser.name = name
return ser
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session."""
return self._minutes_as_series(self.first_minutes_nanos, "first_minutes")
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._minutes_as_series(self.last_minutes_nanos, "last_minutes")
@property
def last_am_minutes(self) -> pd.Series:
"""Last am trading minute of each session."""
return self._minutes_as_series(self.last_am_minutes_nanos, "last_am_minutes")
@property
def first_pm_minutes(self) -> pd.Series:
"""First pm trading minute of each session."""
return self._minutes_as_series(self.first_pm_minutes_nanos, "first_pm_minutes")
# Properties covering all minutes.
def _minutes(self, side: str) -> pd.DatetimeIndex:
return pd.DatetimeIndex(
compute_minutes(
self.opens_nanos,
self.break_starts_nanos,
self.break_ends_nanos,
self.closes_nanos,
side,
),
tz=UTC,
)
@lazyval
def minutes(self) -> pd.DatetimeIndex:
"""All trading minutes."""
return self._minutes(self.side)
@lazyval
def minutes_nanos(self) -> np.ndarray:
"""All trading minutes as nanoseconds."""
return self.minutes.values.astype(np.int64)
# Calendar properties.
@property
def first_session(self) -> pd.Timestamp:
"""First calendar session."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last calendar session."""
return self.sessions[-1]
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of calendar's first session."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of calendar's last session."""
return self.closes[-1]
@property
def first_minute(self) -> pd.Timestamp:
"""Calendar's first trading minute."""
return | pd.Timestamp(self.minutes_nanos[0], tz=UTC) | pandas.Timestamp |
import warnings
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import MultiPoint, Point
def smoothen_triplegs(triplegs, tolerance=1.0, preserve_topology=True):
"""
Reduce number of points while retaining structure of tripleg.
A wrapper function using shapely.simplify():
https://shapely.readthedocs.io/en/stable/manual.html#object.simplify
Parameters
----------
triplegs: GeoDataFrame (as trackintel triplegs)
triplegs to be simplified
tolerance: float, default 1.0
a higher tolerance removes more points; the units of tolerance are the same as the
projection of the input geometry
preserve_topology: bool, default True
whether to preserve topology. If set to False the Douglas-Peucker algorithm is used.
Returns
-------
ret_tpls: GeoDataFrame (as trackintel triplegs)
The simplified triplegs GeoDataFrame
"""
ret_tpls = triplegs.copy()
origin_geom = ret_tpls.geom
simplified_geom = origin_geom.simplify(tolerance, preserve_topology=preserve_topology)
ret_tpls.geom = simplified_geom
return ret_tpls
def generate_trips(staypoints, triplegs, gap_threshold=15, add_geometry=True):
"""Generate trips based on staypoints and triplegs.
Parameters
----------
staypoints : GeoDataFrame (as trackintel staypoints)
triplegs : GeoDataFrame (as trackintel triplegs)
gap_threshold : float, default 15 (minutes)
Maximum allowed temporal gap size in minutes. If tracking data is missing for more than
`gap_threshold` minutes, then a new trip begins after the gap.
add_geometry : bool default True
If True, the start and end coordinates of each trip are added to the output table in a geometry column "geom"
of type MultiPoint. Set `add_geometry=False` for better runtime performance (if coordinates are not required).
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
Returns
-------
sp: GeoDataFrame (as trackintel staypoints)
The original staypoints with new columns ``[`trip_id`, `prev_trip_id`, `next_trip_id`]``.
tpls: GeoDataFrame (as trackintel triplegs)
The original triplegs with a new column ``[`trip_id`]``.
trips: (Geo)DataFrame (as trackintel trips)
The generated trips.
Notes
-----
Trips are an aggregation level in transport planning that summarize all movement and all non-essential actions
(e.g., waiting) between two relevant activities.
The function returns altered versions of the input staypoints and triplegs. Staypoints receive the fields
[`trip_id` `prev_trip_id` and `next_trip_id`], triplegs receive the field [`trip_id`].
The following assumptions are implemented
- If we do not record a person for more than `gap_threshold` minutes,
we assume that the person performed an activity in the recording gap and split the trip at the gap.
- Trips that start/end in a recording gap can have an unknown origin/destination
- There are no trips without a (recorded) tripleg
- Trips optionally have their start and end point as geometry of type MultiPoint, if `add_geometry==True`
- If the origin (or destination) staypoint is unknown, and `add_geometry==True`, the origin (and destination)
geometry is set as the first coordinate of the first tripleg (or the last coordinate of the last tripleg),
respectively. Trips with missing values can still be identified via col `origin_staypoint_id`.
Examples
--------
>>> from trackintel.preprocessing.triplegs import generate_trips
>>> staypoints, triplegs, trips = generate_trips(staypoints, triplegs)
trips can also be directly generated using the tripleg accessor
>>> staypoints, triplegs, trips = triplegs.as_triplegs.generate_trips(staypoints)
"""
assert "is_activity" in staypoints.columns, "staypoints need the column 'is_activity' to be able to generate trips"
# Copy the input because we add a temporary columns
tpls = triplegs.copy()
sp = staypoints.copy()
gap_threshold = pd.to_timedelta(gap_threshold, unit="min")
# If the triplegs already have a column "trip_id", we drop it
if "trip_id" in tpls:
tpls.drop(columns="trip_id", inplace=True)
warnings.warn("Deleted existing column 'trip_id' from tpls.")
# if the staypoints already have any of the columns "trip_id", "prev_trip_id", "next_trip_id", we drop them
for col in ["trip_id", "prev_trip_id", "next_trip_id"]:
if col in sp:
sp.drop(columns=col, inplace=True)
warnings.warn(f"Deleted column '{col}' from staypoints.")
tpls["type"] = "tripleg"
sp["type"] = "staypoint"
# create table with relevant information from triplegs and staypoints.
sp_tpls = pd.concat(
[
sp[["started_at", "finished_at", "user_id", "type", "is_activity"]],
tpls[["started_at", "finished_at", "user_id", "type"]],
]
)
if add_geometry:
sp_tpls["geom"] = pd.concat([sp.geometry, tpls.geometry])
# transform nan to bool
sp_tpls["is_activity"].fillna(False, inplace=True)
# create ID field from index
sp_tpls["sp_tpls_id"] = sp_tpls.index
sp_tpls.sort_values(by=["user_id", "started_at"], inplace=True)
# conditions for new trip
# start new trip if the user changes
condition_new_user = sp_tpls["user_id"] != sp_tpls["user_id"].shift(1)
# start new trip if there is a new activity (last activity in group)
_, _, condition_new_activity = _get_activity_masks(sp_tpls)
# gap conditions
# start new trip after a gap, difference of started next with finish of current.
gap = (sp_tpls["started_at"].shift(-1) - sp_tpls["finished_at"]) > gap_threshold
condition_time_gap = gap.shift(1, fill_value=False) # trip starts on next entry
new_trip = condition_new_user | condition_new_activity | condition_time_gap
# assign an incrementing id to all triplegs that start a trip
# temporary as empty trips are not filtered out yet.
sp_tpls.loc[new_trip, "temp_trip_id"] = np.arange(new_trip.sum())
sp_tpls["temp_trip_id"].fillna(method="ffill", inplace=True)
# exclude activities to aggregate trips together.
# activity can be thought of as the same aggregation level as trips.
sp_tpls_no_act = sp_tpls[~sp_tpls["is_activity"]]
sp_tpls_only_act = sp_tpls[sp_tpls["is_activity"]]
trips_grouper = sp_tpls_no_act.groupby("temp_trip_id")
trips = trips_grouper.agg(
{"user_id": "first", "started_at": min, "finished_at": max, "type": list, "sp_tpls_id": list}
)
def _seperate_ids(row):
"""Split aggregated sp_tpls_ids into staypoint ids and tripleg ids columns."""
row_type = np.array(row["type"])
row_id = np.array(row["sp_tpls_id"])
t = row_type == "tripleg"
tpls_ids = row_id[t]
sp_ids = row_id[~t]
# for dropping trips that don't have triplegs
tpls_ids = tpls_ids if len(tpls_ids) > 0 else None
return [sp_ids, tpls_ids]
trips[["sp", "tpls"]] = trips.apply(_seperate_ids, axis=1, result_type="expand")
# drop all trips that don't contain any triplegs
trips.dropna(subset=["tpls"], inplace=True)
# recount trips ignoring empty trips and save trip_id as for id assignment.
trips.reset_index(inplace=True, drop=True)
trips["trip_id"] = trips.index
# add gaps as activities, to simplify id assignment.
gaps = pd.DataFrame(sp_tpls.loc[gap, "user_id"])
gaps["started_at"] = sp_tpls.loc[gap, "finished_at"] + gap_threshold / 2
gaps[["type", "is_activity"]] = ["gap", True] # nicer for debugging
# same for user changes
user_change = pd.DataFrame(sp_tpls.loc[condition_new_user, "user_id"])
user_change["started_at"] = sp_tpls.loc[condition_new_user, "started_at"] - gap_threshold / 2
user_change[["type", "is_activity"]] = ["user_change", True] # nicer for debugging
# merge trips with (filler) activities
trips.drop(columns=["type", "sp_tpls_id"], inplace=True) # make space so no overlap with activity "sp_tpls_id"
# Inserting `gaps` and `user_change` into the dataframe creates buffers that catch shifted
# "staypoint_id" and "trip_id" from corrupting staypoints/trips.
trips_with_act = | pd.concat((trips, sp_tpls_only_act, gaps, user_change), axis=0, ignore_index=True) | pandas.concat |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + | tm.get_locales() | pandas.util.testing.get_locales |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ddf_library.context import COMPSsContext
from ddf_library.utils import generate_info
from pycompss.api.task import task
import pandas as pd
import numpy as np
import time
from pandas.testing import assert_frame_equal
@task(returns=2)
def _generate_partition(size, f, dim, max_size):
if max_size is None:
max_size = size * 100
cols = ["col{}".format(c) for c in range(dim)]
df = pd.DataFrame({c: np.random.randint(0, max_size, size=size)
for c in cols})
info = generate_info(df, f)
return df, info
def generate_data(sizes, dim=1, max_size=None):
nfrag = len(sizes)
dfs = [[] for _ in range(nfrag)]
info = [[] for _ in range(nfrag)]
for f, s in enumerate(sizes):
dfs[f], info[f] = _generate_partition(s, f, dim, max_size)
return dfs, info
def check_result(df, size, true, msg, end=False):
cond = df[:size] == true
if not cond:
print(df)
raise Exception('Error in {}'.format(msg))
print('{} - ok'.format(msg))
if end:
log('{} - FINISHED'.format(msg))
def log(msg):
print('-' * 50)
print(msg)
def etl():
cc = COMPSsContext()
cc.start_monitor()
url = ('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/abalone/abalone.data')
cols = ['sex', 'length', 'diam', 'height', 'weight', 'rings']
n_dataset = 20
data = pd.read_csv(url, usecols=[0, 1, 2, 3, 4, 8], names=cols)[:n_dataset]
from ddf_library.columns import col, udf
from ddf_library.types import DataType
def f1(x):
return -42 if x > 0.09 else x
f1_udf = udf(f1, DataType.DECIMAL, col('height'))
data1 = cc.parallelize(data, 4).map(f1_udf, 'height_nan').cache()
def f2(x):
t = '.{}'.format(x)
return t
f2_udf = udf(f2, DataType.STRING, col('sex'))
data2 = data1.map(f2_udf, 'sex')
log("etl_test_1: Multiple caches")
df1 = data1.to_df()
df2 = data2.to_df()
df3 = data1.to_df()
check_result(df1['height_nan'].values.tolist(), 5,
[-42, 0.09, -42, -42, 0.08], 'etl_test_1a')
check_result(df2['sex'].values.tolist(), 5, ['.M', '.M', '.F', '.M', '.I'],
'etl_test_1b')
check_result(df3['sex'].values.tolist(), 5, ['M', 'M', 'F', 'M', 'I'],
'etl_test_1c', True)
log("etl_test_2: Branching: data2 and data3 are data1's children. "
"Note: is this case, those transformations can not be grouped")
data3 = data1.drop(['length', 'diam']).cache()
df1 = data1.to_df()
df2 = data2.to_df()
df3 = data3.to_df()
check_result(df1['sex'].values.tolist(), 5, ['M', 'M', 'F', 'M', 'I'],
'etl_test_2a')
check_result(df2['sex'].values.tolist(), 5, ['.M', '.M', '.F', '.M', '.I'],
'etl_test_2b')
check_result(df3['sex'].values.tolist(), 5, ['M', 'M', 'F', 'M', 'I'],
'etl_test_2c', True)
log("etl_test_3: The operations 'drop', 'drop', and 'replace' "
"must be grouped in a single task")
data4 = data2.drop(['length']).drop(['diam'])\
.replace({15: 42}, subset=['rings']).cache()
df = data4.to_df()
check_result(df['rings'].values.tolist(), 5,
[42, 7, 9, 10, 7], 'etl_test_3')
check_result(df.columns.tolist(), 5,
['sex', 'height', 'weight', 'rings', 'height_nan'],
'etl_test_3', True)
log("etl_test_4: Check if split (and others operations that returns "
"more than one output) is working")
n_total = data4.count_rows()
data5a, data5b = data4.split(0.40)
n1 = data5a.count_rows()
n2 = data5b.count_rows()
if n1 + n2 != n_total:
print('data4:', n_total)
print('data5a:', n1)
print('data5b:', n2)
raise Exception('Error in etl_test_4')
log('etl_test_4 - OK')
log("etl_test_5: Check if operations with multiple inputs are working")
data6 = data5b.join(data5a, ['rings'], ['rings'])\
.filter('(rings > 8)')\
.select(['sex_l', 'height_l', 'weight_l', 'rings']).cache()
df = data6.to_df()
check_result(df.columns.tolist(), 5,
['sex_l', 'height_l', 'weight_l', 'rings'], 'etl_test_5a')
data7 = data6.sample(7).sort(['rings'], [True])
data8 = data6.join(data7, ['rings'], ['rings'])
df = data8.to_df()
v1 = sum(df[['height_l_l', 'weight_l_l']].values.flatten())
v2 = sum(df[['height_l_r', 'weight_l_r']].values.flatten())
cond1 = np.isclose(v1, v2, rtol=0.1)
cols = data8.schema()['columns'].values.tolist()
res = ['sex_l_l', 'height_l_l', 'weight_l_l', 'rings',
'sex_l_r', 'height_l_r', 'weight_l_r']
cond2 = cols == res
if not (cond1 and cond2):
raise Exception('Error in etl_test_5b')
log('etl_test_5b - OK')
log("etl_test_76: Check if 'count_rows' and 'take' are working.")
n = 7
v = data1.select(['rings']).count_rows()
len_df = data1.select(['rings']).take(n).count_rows()
cond = v != n_dataset
if cond:
print(v)
raise Exception('Error in etl_test_6a')
cond = len_df != n
if cond:
print(len_df)
raise Exception('Error in etl_test_6b')
log("etl_test_7b - OK")
import time
time.sleep(10)
cc.context_status()
# cc.show_tasks()
cc.stop()
def add_columns():
print("\n|-------- Add Column --------|\n")
data1 = pd.DataFrame([["N_{}".format(i)] for i in range(5)],
columns=['name'])
data2 = pd.DataFrame([["A_{}".format(i), i + 5] for i in range(5, 15)],
columns=['name', 'b'])
cc = COMPSsContext()
ddf_1a = cc.parallelize(data1, 5)
ddf_1b = cc.parallelize(data2, 10)
df1 = ddf_1a.add_column(ddf_1b).to_df()
res = pd.merge(data1, data2, left_index=True, right_index=True,
suffixes=['_l', '_r'])
assert_frame_equal(df1, res, check_index_type=False)
print("etl_test - add column - OK")
cc.stop()
def aggregation():
print("\n|-------- Aggregation --------|\n")
n = 10
data3 = pd.DataFrame([[i, i + 5, 'hello'] for i in range(n)],
columns=['a', 'b', 'c'])
cc = COMPSsContext()
ddf_1 = cc.parallelize(data3, 4).group_by(['c']).agg(
count_a=('a', 'count'),
first_b=('b', 'first'),
last_b=('b', 'last')
)
df = ddf_1.to_df()
print (df)
cond1 = len(df) == 1
cond2 = all([f == n for f in df['count_a'].values])
if not (cond1 and cond2):
print(df)
raise Exception('Error in aggregation')
ddf_2 = cc.parallelize(data3, 4).group_by(['a', 'c']).count('*')
df = ddf_2.to_df()
cond1 = len(df) == n
cond2 = all([f == 1 for f in df['count(*)'].values])
if not (cond1 and cond2):
print(df)
raise Exception('Error in aggregation')
ddf3 = cc.parallelize(data3, 4).group_by('c').list('*')
print(ddf3.to_df())
ddf3 = cc.parallelize(data3, 4).group_by(['c']).set('*')
print(ddf3.to_df())
print("etl_test - aggregation - OK")
cc.stop()
def balancer():
print("\n|-------- Balance --------|\n")
iterations = [[10, 0, 10, 5, 100],
# [100, 5, 10, 0, 10],
# [85, 0, 32, 0, 0],
# [0, 0, 0, 30, 100]
]
cc = COMPSsContext()
cc.set_log(True)
for s in iterations:
print('Before:', s)
data, info = generate_data(s)
ddf_1 = cc.import_compss_data(data, schema=info, parquet=False).cache()
df1 = ddf_1.to_df()['col0'].values
ddf_2 = ddf_1.balancer(forced=True) #.cache()
size_a = ddf_2.count_rows(total=False)
df2 = ddf_2.to_df()['col0'].values
print('After:', size_a)
print(np.array_equal(df1, df2))
cc.stop()
def cast():
print("\n|-------- cast --------|\n")
data = pd.DataFrame([[i, i + 5, 0] for i in range(10)],
columns=['a', 'b', 'c'])
cc = COMPSsContext()
ddf_1 = cc.parallelize(data, 4).cast(['a', 'b'], 'string')
schema = ddf_1.schema()
print(schema)
print("etl_test - cast - OK")
cc.stop()
def cross_join():
print("\n|-------- CrossJoin --------|\n")
data1 = pd.DataFrame([["Bob_{}".format(i), i + 5] for i in range(5)],
columns=['name', 'height'])
data2 = pd.DataFrame([[i + 5] for i in range(5, 15)], columns=['gain'])
cc = COMPSsContext()
ddf_1a = cc.parallelize(data1, 4)
ddf_1b = cc.parallelize(data2, 4)
df1 = ddf_1a.cross_join(ddf_1b).to_df().sort_values(by=['name', 'gain'])
print(df1[0:50])
cc.stop()
def distinct():
print("\n|-------- Distinct --------|\n")
data = pd.DataFrame([[i, i + 5, 0] for i in range(10)],
columns=['a', 'b', 'c'])
cc = COMPSsContext()
ddf_1 = cc.parallelize(data, 4).distinct(['c'], opt=True)
df1 = ddf_1.cache().to_df()
print(df1)
res_dist = pd.DataFrame([[0, 5, 0]], columns=['a', 'b', 'c'])
assert_frame_equal(df1, res_dist, check_index_type=False)
print("etl_test - distinct - OK")
cc.stop()
def drop():
print("\n|-------- Drop --------|\n")
data = pd.DataFrame([[i, i + 5, 0] for i in range(10)],
columns=['a', 'b', 'c'])
cc = COMPSsContext()
ddf_1 = cc.parallelize(data, 4).drop(['a'])
df1 = ddf_1.to_df()
res_drop = pd.DataFrame([[5, 0], [6, 0], [7, 0], [8, 0], [9, 0],
[10, 0], [11, 0], [12, 0],
[13, 0], [14, 0]], columns=['b', 'c'])
assert_frame_equal(df1, res_drop, check_index_type=False)
print("etl_test - drop - OK")
cc.stop()
def drop_na():
print("\n|-------- DropNaN --------|\n")
data3 = pd.DataFrame([[i, i + 5, 'hello'] for i in range(5, 15)],
columns=['a', 'b', 'c'])
data3.loc[15, ['b']] = np.nan
data3['d'] = [10, 12, 13, 19, 19, 19, 19, 19, 19, 19, np.nan]
data3['g'] = [10, 12, 13, 19, 19, 19, 19, 19, 19, np.nan, np.nan]
cc = COMPSsContext()
ddf_1 = cc.parallelize(data3, 4)
df1a = ddf_1.dropna(['c'], mode='REMOVE_COLUMN', how='all', thresh=1)
df1b = ddf_1.dropna(['c'], mode='REMOVE_ROW', how='any')
print(df1a.to_df())
print(df1b.to_df())
cc.stop()
def except_all():
print("\n|-------- ExceptAll --------|\n")
cols = ['a', 'b']
s1 = pd.DataFrame([("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3),
("c", 4)], columns=cols)
s2 = pd.DataFrame([("a", 1), ("b", 3), ('e', 4), ('e', 4), ('e', 4),
('e', 6), ('e', 9), ('e', 10), ('e', 4), ('e', 4)],
columns=cols)
cc = COMPSsContext()
ddf_1a = cc.parallelize(s1, 2)
ddf_1b = cc.parallelize(s2, 4)
ddf_2 = ddf_1a.except_all(ddf_1b)
df1 = ddf_2.to_df()
print(df1)
"""
("a", 1),
("a", 1),
("a", 2),
("c", 4)
"""
res = pd.DataFrame([("a", 1), ("a", 1), ("a", 2), ("c", 4)], columns=cols)
assert_frame_equal(df1, res, check_index_type=False)
print("etl_test - exceptAll - OK")
cc.stop()
def explode():
print("\n|-------- Explode --------|\n")
df_size = 1 * 10 ** 3
df = pd.DataFrame(np.random.randint(1, df_size, (df_size, 2)),
columns=list("AB"))
df['C'] = df[['A', 'B']].values.tolist()
col = 'C'
cc = COMPSsContext()
ddf1 = cc.parallelize(df, 4).explode(col)
ddf1.show()
print("etl_test - explode - OK")
cc.stop()
def filter_operation():
print("\n|-------- Filter --------|\n")
data = pd.DataFrame([[i, i + 5] for i in range(10)], columns=['a', 'b'])
cc = COMPSsContext()
ddf_1 = cc.parallelize(data, 4).filter('a > 5')
df1 = ddf_1.to_df()
res_fil = pd.DataFrame([[6, 11], [7, 12],
[8, 13], [9, 14]], columns=['a', 'b'])
assert_frame_equal(df1, res_fil, check_index_type=False)
print("etl_test - filter - OK")
cc.stop()
def fill_na():
print("\n|-------- FillNaN --------|\n")
data3 = pd.DataFrame([[i, i + 5, 'hello'] for i in range(5, 15)],
columns=['a', 'b', 'c'])
data3.loc[15, ['b']] = np.nan
data3.loc[0:2, ['c']] = 'Hi'
data3.loc[6:9, ['c']] = 'world'
data3['d'] = [10, 12, 13, 19, 19, 19, 19, 19, 19, 19, np.nan]
data3['e'] = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, np.nan]
data3['f'] = [10, 10, 10, 10, 10, 10, 10, 10, 10, np.nan, np.nan]
data3['g'] = [10, 12, 13, 19, 19, 19, 19, 19, 19, np.nan, np.nan]
data3['h'] = [10, 12, 13, 19, 5, 5, 5, 5, 5, np.nan, np.nan]
data3['i'] = [5, 12, 13, 19, 19, 19, 5, 5, 5, 5, np.nan]
cc = COMPSsContext()
ddf_1 = cc.parallelize(data3, 4)
# df1a = ddf_1.fillna(mode='VALUE', value=42),
# df1a = ddf_1.fillna(mode='VALUE', value={'c': 42})
# df1a = ddf_1.fillna(['a', 'b'], mode='MEAN')
# df1a = ddf_1.fillna(['c'], mode='MODE')
df1a = ddf_1.fillna(['a', 'b', 'd','e','f','g', 'h', 'i'], mode='MEDIAN')
print(df1a.to_df())
print("A: 9.5 - B: 14.5 - D: 19.0 - E: 10.0 - G: 19.0 - H: 5.0 - I: 8.5")
cc.stop()
def flow_serial_only():
print("\n|-------- Flow to test serial tasks --------|\n")
data = pd.DataFrame([[i, i + 5, 'hello', i + 7] for i in range(1, 15)],
columns=['a', 'b', 'c', 'd'])
from ddf_library.types import DataType
from ddf_library.columns import col, udf
def f3(x):
return 7 if x > 5 else x
cat = udf(f3, DataType.INT, col('a'))
cc = COMPSsContext()
ddf1 = cc.parallelize(data, '*') \
.map(cat, 'e')\
.drop(['c'])\
.select(['a', 'b', 'd'])\
.select(['a', 'b']).to_df()
print(ddf1)
cc.show_tasks()
cc.stop()
def flow_recompute_task():
print("\n|-------- Flow to test task re-computation --------|\n")
cc = COMPSsContext()
# cc.set_log(True)
data = pd.DataFrame([[i, i + 5, 'hello', i + 7] for i in range(1, 25)],
columns=['a', 'b', 'c', 'd'])
ddf1 = cc.parallelize(data, '*')\
.drop(['c']) \
.sample(10)
ddf2 = ddf1.distinct(['a'])\
.select(['a', 'b', 'd'])\
.select(['a', 'b']) \
.select(['a'])\
.sample(5).select(['a'])
ddf2.save.csv('file:///tmp/flow_recompute_task')
ddf3 = ddf1.select(['a', 'b'])
ddf3.show()
cc.context_status()
cc.stop()
# expected result: 1 temporary output (select) and 1 persisted (save)
def hash_partition():
print("\n|-------- Hash partition --------|\n")
n_rows = 1000
data = pd.DataFrame({'a': np.random.randint(0, 100000, size=n_rows),
'b': np.random.randint(0, 100000, size=n_rows),
'c': np.random.randint(0, 100000, size=n_rows)
})
data['b'] = data['b'].astype(str)
cc = COMPSsContext()
ddf_1 = cc.parallelize(data, 12).hash_partition(columns=['a', 'b'],
nfrag=6)
f = ddf_1.num_of_partitions()
c = ddf_1.count_rows(total=False)
print(ddf_1.count_rows(total=False))
print(sum(c) == n_rows)
print(f == 6)
df1 = ddf_1.to_df().sort_values(by=['a', 'b'])
data = data.sort_values(by=['a', 'b'])
assert_frame_equal(df1, data, check_index_type=False)
print("etl_test - hash_partition - OK")
cc.stop()
def import_data():
print("\n|-------- Import data --------|\n")
s1 = pd.DataFrame([("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3),
("c", 4)], columns=['col1', 'col2'])
cc = COMPSsContext()
df1 = cc.import_compss_data(np.array_split(s1, 4)).to_df()
assert_frame_equal(df1, s1, check_index_type=False)
print("etl_test - import data - OK")
cc.stop()
def intersect():
print("\n|-------- Intersect --------|\n")
cols = ['col1', 'col2']
s1 = pd.DataFrame([("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3),
("c", 4)], columns=cols)
s2 = pd.DataFrame([('a', 1), ('a', 1), ('b', 3)], columns=cols)
cc = COMPSsContext()
ddf_1a = cc.parallelize(s1, 4)
ddf_1b = cc.parallelize(s2, 4)
ddf_2 = ddf_1a.intersect(ddf_1b)
df1 = ddf_2.to_df().sort_values(by=cols)
res = | pd.DataFrame([['b', 3], ['a', 1]], columns=cols) | pandas.DataFrame |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": | pd.to_datetime("2022") | pandas.to_datetime |
#!/usr/bin/env python
# inst: university of bristol
# auth: <NAME>
# mail: <EMAIL> / <EMAIL>
import os
import shutil
from glob import glob
import zipfile
import numpy as np
import pandas as pd
import gdalutils
from osgeo import osr
def _secs_to_time(df, date1):
df = df.copy()
conversion = 86400 # 86400s = 1day
df['time'] = pd.to_datetime(
df['Time']/conversion, unit='D', origin=pd.Timestamp(date1))
df.set_index(df['time'], inplace=True)
del df['Time']
del df['time']
return df
def _hours_to_time(df, date1):
df = df.copy()
conversion = 24 # 24h = 1day
df['time'] = pd.to_datetime(
df['Time']/conversion, unit='D', origin=pd.Timestamp(date1))
df.set_index(df['time'], inplace=True)
del df['Time']
del df['time']
return df
def _get_lineno(filename, phrase):
with open(filename, 'r') as f:
for num, line in enumerate(f):
if phrase in line:
return num
def read_mass(filename, date1='1990-01-01'):
df = pd.read_csv(filename, delim_whitespace=True)
df = _secs_to_time(df, date1)
df['res'] = np.arange(0, df.index.size)
return df
def read_discharge(filename, date1='1990-01-01'):
line = _get_lineno(filename, 'Time') + 1 # inclusive slicing
df = pd.read_csv(filename, skiprows=range(0, line),
header=None, delim_whitespace=True)
df.rename(columns={0: 'Time'}, inplace=True)
df = _secs_to_time(df, date1)
return df
def read_stage(filename, date1='1990-01-01'):
line = _get_lineno(filename, 'Time') + 1 # inclusive slicing
df = pd.read_csv(filename, skiprows=range(0, line),
header=None, delim_whitespace=True)
df.rename(columns={0: 'Time'}, inplace=True)
df = _secs_to_time(df, date1)
return df
def read_stage_locs(filename):
str_line = _get_lineno(filename, 'Stage information') + 1
end_line = _get_lineno(filename, 'Output, depths:') - 1
df = pd.read_csv(filename, header=None, delim_whitespace=True,
skiprows=range(0, str_line), nrows=end_line-str_line,
index_col=0, names=['x', 'y', 'elev'])
return df
def read_bci(filename):
return pd.read_csv(filename, skiprows=1, delim_whitespace=True,
names=['boundary', 'x', 'y', 'type', 'name'])
def read_bdy(filename, bcifile, date1='1990-01-01'):
phrase = 'hours'
bdy = pd.DataFrame()
with open(filename, 'r') as f:
for num, line in enumerate(f):
if phrase in line:
start = num + 1
lines = int(line.split(' ')[0])
total = start + lines
df = pd.read_csv(filename, skiprows=start, nrows=total-start,
header=None, delim_whitespace=True)
bdy = | pd.concat([bdy, df[0]], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 10:04:19 2018
@author: <NAME> and <NAME>
--Collector Probe W Analysis and Mapping--
"""
# set imports
import os
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
# define file directory and file name
os.chdir(r'C:\Users\jduran2\Documents\Python Scripts\Python Test Directory')
file_name = 'AU27_Map_100um.xlsx'
####################################################
# User Inputs
bkg_min = 0
bkg_max = 80
scan_length = 50 # mm
scan_speed_um = 500 # um/sec
####################################################
# import data from excel file into pandas dataframe
df = pd.read_excel(file_name,
sheetname = 1,
header = 0,
skiprows = 3,
index_col = False,
keep_default_na = True
)
# calculate total W signature and add to df
df['Total W'] = df['W180']+df['W182']+df['W183']+df['W184']+df['W186']
# set background range of rows, average background, and subtract from remaining data
#bkg_min = input('Input starting row for background range: ') # user input option
#bkg_max = input('Input ending row for background range: ') # user input option
bkg_min = bkg_min
bkg_max = 1 + bkg_max
bkg_df = df.iloc[bkg_min:bkg_max]
mean_bkg_180 = bkg_df["W180"].mean()
mean_bkg_182 = bkg_df["W182"].mean()
mean_bkg_183 = bkg_df["W183"].mean()
mean_bkg_184 = bkg_df["W184"].mean()
mean_bkg_186 = bkg_df["W186"].mean()
mean_bkg_TotalW = bkg_df["Total W"].mean()
df['W180'] = df['W180'] - mean_bkg_180
df['W182'] = df['W182'] - mean_bkg_182
df['W183'] = df['W183'] - mean_bkg_183
df['W184'] = df['W184'] - mean_bkg_184
df['W186'] = df['W186'] - mean_bkg_186
# re-calculate total W signature and add to df
df['Total W'] = df['W180']+df['W182']+df['W183']+df['W184']+df['W186']
# calculate signal error based on detector counting statistics or root(N) and error propagation
for row in range(0, len(df['Time [Sec]'])): #start from 0
df.loc[row,'W180 error'] = np.sqrt(df.loc[row,'W180'])
df.loc[row,'W182 error'] = np.sqrt(df.loc[row,'W182'])
df.loc[row,'W183 error'] = np.sqrt(df.loc[row,'W183'])
df.loc[row,'W184 error'] = np.sqrt(df.loc[row,'W184'])
df.loc[row,'W186 error'] = np.sqrt(df.loc[row,'W186'])
df.loc[row,'Total W error'] = np.sqrt(np.power(df.loc[row,'W180 error'],2)+np.power(df.loc[row,'W182 error'],2)
+np.power(df.loc[row,'W183 error'],2)+np.power(df.loc[row,'W184 error'],2)+np.power(df.loc[row,'W186 error'],2))
df.loc[row,'W180 EF'] = np.divide(df.loc[row,'W180'],df.loc[row,'Total W'])
df.loc[row,'W182 EF'] = np.divide(df.loc[row,'W182'],df.loc[row,'Total W'])
df.loc[row,'W183 EF'] = np.divide(df.loc[row,'W183'],df.loc[row,'Total W'])
df.loc[row,'W184 EF'] = np.divide(df.loc[row,'W184'],df.loc[row,'Total W'])
df.loc[row,'W186 EF'] = np.divide(df.loc[row,'W186'],df.loc[row,'Total W'])
df.loc[row,'W180 EF error'] = np.multiply(df.loc[row,'W180 EF'],np.sqrt(np.power(np.divide(df.loc[row,'W180 error'],df.loc[row,'W180']),2) + np.power(np.divide(df.loc[row,'Total W error'],df.loc[row,'Total W']),2)))
df.loc[row,'W182 EF error'] = np.multiply(df.loc[row,'W182 EF'],np.sqrt(np.power(np.divide(df.loc[row,'W182 error'],df.loc[row,'W182']),2) + np.power(np.divide(df.loc[row,'Total W error'],df.loc[row,'Total W']),2)))
df.loc[row,'W183 EF error'] = np.multiply(df.loc[row,'W183 EF'],np.sqrt(np.power(np.divide(df.loc[row,'W183 error'],df.loc[row,'W183']),2) + np.power(np.divide(df.loc[row,'Total W error'],df.loc[row,'Total W']),2)))
df.loc[row,'W184 EF error'] = np.multiply(df.loc[row,'W184 EF'],np.sqrt(np.power(np.divide(df.loc[row,'W184 error'],df.loc[row,'W184']),2) + np.power(np.divide(df.loc[row,'Total W error'],df.loc[row,'Total W']),2)))
df.loc[row,'W186 EF error'] = np.multiply(df.loc[row,'W186 EF'],np.sqrt(np.power(np.divide(df.loc[row,'W186 error'],df.loc[row,'W186']),2) + np.power(np.divide(df.loc[row,'Total W error'],df.loc[row,'Total W']),2)))
# create a function for removing nan values
#def final_W_err_convert(sizing_data, EF_err):
# final_EF_err = []
# for row in range(0, len(sizing_data)): #start from 0
# if EF_err[row] == 'nan':
# final_EF_err.append(0)
# else:
# final_EF_err.append(EF_err[row])
# return final_EF_err
#
#EF180err = final_W_err_convert(ScanTime, EF180err)
#EF182err = final_W_err_convert(ScanTime, EF182err)
#EF183err = final_W_err_convert(ScanTime, EF183err)
#EF184err = final_W_err_convert(ScanTime, EF184err)
#EF186err = final_W_err_convert(ScanTime, EF186err)
# set scan length and scan speed
#scan_length = 50 # mm
axial_scan_data_length = scan_length*4
#scan_speed_um = 500 # um/sec
scan_speed_mm = scan_speed_um/1000 #mm/sec
# create scan time np.array
ScanTime = np.array([])
ScanTime = np.append(ScanTime,0)
for row in range(1, len(df['Time [Sec]'])-1): #start from 1, to leave out row 0
ScanTime = np.append(ScanTime,ScanTime[row-1] + df['Time [Sec]'][row] - df['Time [Sec]'][row-1])
# create scan distance np.array
ScanDist = np.array([])
for row in range(0, len(ScanTime)): #start from 0
ScanDist = np.append(ScanDist,ScanTime[row]*scan_speed_mm)
Axial_Location = np.array(ScanDist[0:axial_scan_data_length])
# filter data based on 'Total W' column
map_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf_8 -*
#
# Script to reformat xlsx tables from Akamai State of the Internet into a single SQL table
#
# <NAME> 2014
# <EMAIL>
# requires pandas, xlrd
import pandas as pd
import sqlite3
def importdata(infile,table,connection):
# Get list of sheets or count of sheets
xfile = | pd.ExcelFile(infile) | pandas.ExcelFile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 snaketao. All Rights Reserved
#
# @Version : 1.0
# @Author : snaketao
# @Time : 2021-10-21 12:21
# @FileName: insert_mongo.py
# @Desc : insert data to mongodb
import appbk_mongo
import pandas as pd
#数据处理,构造一个movies对应多个tagid的字典,并插入 mongodb 的movies集合
def function_insert_movies():
file1 = pd.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\movies.csv')
data = []
for indexs in file1.index:
sett = {}
a = file1.loc[indexs].values[:]
sett['movieid'] = int(a[0])
sett['title'] = a[1]
sett['genres'] = a[2].split('|')
sett['tags'] = []
data.append(sett)
file2 = pd.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-scores.csv')
file3 = pd.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-tags.csv')
print(-1)
file2.sort_values(['movieId','relevance'], ascending=[True,False], inplace=True)
grouped = file2.groupby(['movieId']).head(3)
result = | pd.merge(grouped, file3, how='inner', on='tagId',left_index=False, right_index=False, sort=False,suffixes=('_x', '_y'), copy=True) | pandas.merge |
# -*- coding: utf-8 -*-
""" Simple multi-area model for Nordic electricity market
Created on Wed Jan 16 11:31:07 2019
@author: elisn
Notes:
1 - For conversion between dates (YYYYMMDD:HH) and weeks (YYYY:WW) weeks are counted as starting during the first hour
in a year and lasting 7 days, except for the last week which covers the remaining hours in the year. Thus all years
are assumed to have 52 weeks. This definition is not according to ISO calendar standard but is legacy from the
first version of the model, probably changing it would not significantly change the results. Also note that the
MAF inflow data used also does not follow ISO calendar standard for weeks but counts weeks as starting with Sundays.
2 - It is not known if the ENTSO-E reservoir data corresponds to the reservoir level at the beginning/end of the week.
This can decrease the accuracy of the model for short time periods but does not affect much when simulating a whole year
A request has been made to find the answer from ENTSO-E
3 - For the exchange GB-NL, February 20-27 2016, the flows and scheduled exchanges are outside the implicitly
allocated day ahead capacity, it's not known why
"""
### EXTERNAL LIBRARIES ###
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pathlib import Path
import datetime
import os
import pyomo.environ as pye
from contextlib import redirect_stdout
##############################################################
######### MODULES FROM POWER_DATABASES #######################
##############################################################
import maf_hydro_data
import maf_pecd_data
import entsoe_transparency_db as entsoe
from help_functions import compact_xaxis_ticks, \
week_to_range, str_to_date, intersection, duration_curve, interp_time, \
interpolate_weekly_values, time_to_bin, err_func, curtailment_statistics
### INTERNAL MODULES ###
from offer_curves import SupplyCurve
from model_definitions import MWtoGW, GWtoMW, cm_per_inch, std_fig_size, area_to_country, country_to_areas, entsoe_type_map, synchronous_areas, colors, \
nordpool_capacities, generators_def, solver_executables, solver_stats, bidz2maf_pecd, co2_price_ets, \
new_trans_cap, GWtoTW, TWtoGW, all_areas
from help_classes import EmptyObject, Error
from week_conversion import WeekDef
class Model:
""" Contains all data processing not related to specific solver api (gurobi/pyomo)
NAMING CONVENTIONS:
df_xxx - dataframe obtained from external database
TIME CONVENTIONS:
For average energy quantities, time stamp marks beginning of the (hourly) interval. This is consistent with
convention in databases, since the beginning of the hour has been used to time stamp hourly data.
starttime - beginning of first period in model
endtime - end of last period in model
timerange - all hours modelled (beginning of hour)
idx_time - index of timerange, used to create time set in optimization model
timerange_p1 - all hours including endtime hour
Note: the data used is retrieved for all hours in timerange plus one extra hour, to allow for interpolation of
the data to higher resolution
"""
def __init__(self,name='default',path='D:/NordicModel/Results',db_path='D:/Data',
data_path='D:/NordicModel/InputData'):
self.name = name
self.data_path = Path(data_path)
self.db_path = Path(db_path)
self.res_path = Path(path) / name
self.fig_path = self.res_path / 'Figures'
self.root_path = self.res_path # points to root directory of this model
self.res_path.mkdir(exist_ok=True,parents=True)
self.fig_path.mkdir(exist_ok=True,parents=True)
self.runs = [] # store results from multiple model runs
self.res_time = {} # store runtime info
def update_path(self,path='D:/NordicModel/Results/case'):
""" Update path where figures and results are stored, without changing root path """
self.res_path = Path(path)
self.fig_path = self.res_path / 'Figures'
self.res_path.mkdir(exist_ok=True)
self.fig_path.mkdir(exist_ok=True)
def default_options(self):
""" Set default options for model """
############# BASIC OPTIONS ##############
self.opt_solver = 'ipopt' # solver to use, must be installed
self.opt_api = 'pyomo' # pyomo/gurobi (gurobi api only works if solver is also gurobi)
self.opt_solver_opts = {} # options to pass to solver (with pyomo api)
self.opt_start = '20180101'
self.opt_end = '20180108'
self.opt_weather_year = 2016 # used to get maf data, inflow data, and solar merra data
self.opt_load_scale = 1 # scale load by this factor
self.opt_loss = 0 # Fraction of energy lost in transmission
self.opt_nonnegative_data = ['inflow']
self.opt_countries = ['SE','DK','NO','FI','EE','LT','LV','PL','DE','NL','GB'] # modelled countries
self.opt_use_maf_pecd = False # use solar and wind data from MAF2020
self.opt_impute_limit = 30 # maximum number of values to interpolate in data
self.opt_impute_constant = { # constants used to impute remaining missing values in input data
'exchange':0, # for external exchanges
'solar':0,
}
self.opt_run_initialization = False # run low resolution model to get values for initialization
self.opt_init_delta = 168
# Note: initialization is useful for some solvers (e.g. ipopt) but may not be for others (e.g. gurobi)
self.opt_db_files = {
'capacity':'capacity.db',
'prices':'prices.db',
'exchange':'exchange.db',
'gen':'gen.db',
'unit':'unit.db',
'load':'load.db',
'reservoir':'reservoir.db',
'inflow':'inflow.db',
'maf_hydro':'maf_hydro.db',
'maf_pecd':'maf_pecd.db',
}
self.opt_err_labl = 'MAE' # should be consistent with the error computed in err_func
########## COST OPTIONS ##########################
self.opt_costfit_tag = '2019' # use this costfit from the input parameters
self.opt_hydro_cost = False # include fitted hydro costs, not properly implemented
self.opt_default_thermal_cost = 40 # default value for thermal cost
self.opt_loadshed_cost = 3000 # cost for demand curtailment
self.opt_nuclear_cost = 7.35 # default value for nuclear cost
self.opt_wind_cost = 1 # low wind cost in EUR/MWh to favour wind curtailment over solar
self.opt_use_var_cost = True # use variable costs
# Source for variable cost data: data['costfit_shifted']['tag']
# replace extreme cost fits (e.g. decreasing mc or very sharply increasing mc with fuel-based constant MC)
self.opt_overwrite_bad_costfits = True
self.opt_c2_min = 1e-5
self.opt_c2_max = 0.5
# specify co2 price, this is added to the price coefficient MC(p)=k*p+m+(co2_price-co2_price(offset_year))
self.opt_co2_price = None
self.opt_co2_price_offset_year = 2016 # if set to year, this assumes m already contains the cost for that year
############ TECHNICAL LIMITS #########################
self.opt_capacity_year = 2019 # use generation capacity from entsoe for this year
self.opt_hvdc_max_ramp = 600 # 600 MW/hour
self.opt_pmax_type = 'capacity'
self.opt_pmax_type_hydro = 'stats'
# Options for pmax: 'stats' - from gen_stats.xlsx (production statistics)
# 'capacity' - from entsoe capacity per type database
# For hydro the source for the maximum capacity is chosen separately
self.opt_pmin_zero = False # put pmin = 0
######### NUCLEAR OPTIONS ################
self.opt_nucl_min_lvl = 0.65 # nuclear can ramp down to this level
self.opt_nucl_ramp = None # overwrite nuclear ramp rate (%/hour)
self.opt_nucl_add_cap = {
'SE3':0,
'FI':0,
'DE':0,
} # add this firm capacity to nuclear generation
# option to compute nuclear max levels from individual units for some areas, can be used to deactivate certain
# nuclear reactors in order to simulate scenarios, requires production data for individual units
self.opt_nucl_individual_units = []
# exclude these nuclear reactors when deciding maximum generation levels - only possible with opt_nucl_individual_units
self.opt_nucl_units_exclude = []
#self.opt_nucl_units_exclude = ['Ringhals block 1 G11','Ringhals block 1 G12','Ringhals block 2 G21','Ringhals block 2 G22']
######### HYDRO OPTIONS #################
self.opt_reservoir_offset = 168
self.opt_reservoir_data_normalized = True # use normalized reservoir data
self.opt_default_inflow = 100
self.opt_default_inflow_area = { # GWh/week, per area
'DE':346, # 180 TWh yearly production
'PL':45,
'GB':107,
}
self.opt_use_maf_inflow = False # use MAF inflow data or inflow calculated from ENTSO-E data
# inflow interpolation:
# constant (i.e. constant for one week)
# linear (linear ramp rate between weeks)
self.opt_inflow_interp = 'linear'
self.opt_hydro_daily = False # daily reservoir constraints (instead of hourly)
self.opt_reservoir_start_fill = 0.5 # if reservoir data does not exist, assume default filling value
self.opt_reservoir_end_fill = 0.5
# share of inflow which is run of river, if no data available
self.opt_ror_fraction = {
'SE1':0.13,
'SE2':0.21,
'SE3':0.27,
'SE4':0.3,
'NO1':0.25,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'FI':0.27,
'LV':0.4,
'LT':0.5,
'PL':0.8,
'DE':0.9,
'GB':0.4,
}
self.opt_reservoir_capacity = { # GWh
'NO1':6507,
'NO2':33388,
'NO3':8737,
'NO4':19321,
'NO5':16459,
'SE1':13688,
'SE2':15037,
'SE3':2517,
'SE4':216,
'FI':4512,
'LT':12.2,
'LV':11.2,
'PL':2.3,
'DE':1263,
'GB':26.4,
}
# pumping capacity
self.opt_pump_capacity = { # in MW, from MAF data
'PL':1660,
'DE':7960,
'GB':2680,
'NO1':130,
'NO2':430,
'NO3':70,
'NO5':470,
'LT':720,
}
self.opt_pump_reservoir = { # in GWh
'PL':6.3,
'DE':20,
}
# pumping efficiency
self.opt_pump_efficiency = 0.75
############# RESERVE OPTIONS ################
self.opt_use_reserves = False # include reserve requirements
self.opt_country_reserves = False # reserves by country instead of by area (more flexibility)
self.opt_reserves_fcrn = { # this is the allocation of 600 MW FCR-N
'SE':245,
'NO':215,
'DK':0,
'FI':140,
}
self.opt_reserves_fcrd = 1200 # FCR-D, allocated in same proportion as FCR-N
######## EXTERNAL AREAS OPTIONS #################
# the price will be set for these price areas, and the export/import will be variable instead of fixed
self.opt_set_external_price = ['DE','PL']
self.opt_default_prices = {
'PL':40, # use this price for external connections if no other is avaialable
'RU':40,
'DE':40,
'NL':40,
'GB':40,
}
self.opt_exchange_data_type = 'flow'
########### TRANSFER CAPACITY OPTIONS #####################
self.opt_use_var_exchange_cap = True
self.opt_nominal_capacity_connections = [('NL','GB'),]
# these connections will always use nomianl exchange capacity
self.opt_min_exchange_cap = 100 # minimum variable transfer capacity (MW)
# may be set to >= 2018 to include additional future transmission capacity,
# from new_trans_cap in model_definitions
self.opt_exchange_cap_year = None
########## WIND OPTIONS #############
self.opt_wind_scale_factor = {
'SE1':1,
'SE2':1,
'SE3':1,
'SE4':1,
}
self.opt_wind_capacity_onsh = {
'DK1':3725,
'DK2':756,
'EE':329,
'FI':2422,
'LT':540,
'LV':84,
'NO1':166,
'NO2':1145,
'NO3':1090,
'NO4':668,
'NO5':0,
'SE1':1838,
'SE2':3849,
'SE3':2780,
'SE4':1581,
'PL':5952,
'NL':3973,
'DE':53338,
'GB':14282,
}
self.opt_wind_capacity_offsh = {
'DK1':1277,
'DK2':423,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':1709,
'DE':7504,
'GB':10383,
}
########### SOLAR OPTIONS #############
# Note: the solar capacities only apply if opt_use_maf_pecd is True, otherwise ENTSO-E production data is used for solar
# manually specify solar capacity for areas:
self.opt_solar_cap_by_area = {
'DK1':878, # from ENTSO-E
'DK2':422,
'EE':164,
'FI':215,
'LT':169,
'LV':11,
'SE1':9, # from Energiåret 2020 (energiföretagen)
'SE2':67,
'SE3':774,
'SE4':240,
'PL':1310,
'NL':5710,
'DE':48376,
'GB':13563,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics 2020
'DK':1079,
'FI':215,
'NO':90,
'SE':644,
'LV':3,
'LT':103,
'EE':107,}
########## INERTIA OPTIONS ####################
self.opt_use_inertia_constr = False # inertia constraints
self.opt_min_kinetic_energy = 113 # GWs
# calculation of kinetic energy: Ek = H*P/(cf*pf)
# inertia constants from Persson (2017) Kinetic Energy Estimation in the Nordic System
self.opt_inertia_constants = {
'SE':{'Hydro':4.5,'Thermal':2.9,'Nuclear':6.2,},
'NO':{'Hydro':2.9,'Thermal':2.5,},
'FI':{'Hydro':2.8,'Thermal':4.4,'Nuclear':6.6,},
'DK':{'Thermal':4.5,},
}
# assumption about power factor pf
self.opt_inertia_pf = {
'SE':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'NO':{'Hydro':0.9,'Thermal':0.9,},
'FI':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'DK':{'Thermal':0.9,},
}
# assumption about capacity factor cf
self.opt_inertia_cf = {
'SE':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'NO':{'Hydro':0.8,'Thermal':1,},
'FI':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'DK':{'Thermal':1,},
}
####### ROUNDING VALUES ##############
self.opt_bound_cut = { # round values below this threshold to zero, to avoid small coefficients
'max_SOLAR':1e-4,
'max_WIND':1e-4,
'min_PG':1e-4,
}
######## FIGURE OPTIONS ##################
self.fopt_no_plots = False
self.fopt_plots = {
'gentype':True,
'gentot':True,
'gentot_bar':False,
'renewables':False,
'transfer_internal':True,
'transfer_external':True,
'reservoir':False,
'price':False,
'losses':False,
'load_curtailment':False,
'inertia':False,
'hydro_duration':False,
'wind_curtailment':False,
}
self.fopt_plot_weeks = []
self.fopt_use_titles = True
self.fopt_show_rmse = True # also show absolute RMSE on fopt_plots
self.fopt_eps = False
self.fopt_print_text = False # print model to text file
self.fopt_print_dual_text = False # print dual to text file
self.fopt_dpi_qual = 1000
# control inset in plot
self.fopt_inset_date = None
self.fopt_inset_days = 5
self.fopt_calc_rmse = { # some rmse calculations need additional data
'price':True,
'transfer':True
}
self.fopt_rmse_transfer_data_type = 'flow'
##### OPTIONS TO PRINT OUTPUT ######
self.opt_print = {
'init':True,
'solver':True,
'setup':True,
'postprocess':True,
'check':True,
}
self.default_pp_opt()
def default_pp_opt(self):
########## OPTIONS CONTROLLING POST PROCESSING ###############
self.pp_opt = EmptyObject()
self.pp_opt.get_vars = ['SPILLAGE','PG','RES','X1','X2','WIND','XEXT','LS','SOLAR','HROR','PUMP','REL','PRES']
self.pp_opt.inst_vars = ['RES','PRES']
self.pp_opt.daily_vars = ['RES','SPILLAGE'] # daily variables if opt_hydro_daily is True
# Note: duals only obtained only if the constraint exists (some constraints are optional)
self.pp_opt.get_duals = ['POWER_BALANCE','RESERVOIR_BALANCE','HVDC_RAMP','GEN_RAMP',
'RESERVES_UP','RESERVES_DW','FIX_RESERVOIR','INERTIA']
self.pp_opt.get_cur_vars = ['WIND','SOLAR','HROR']
def effective_reservoir_range(self):
# effective ranges, based on min and max reservoir values from entso-e data
self.opt_reservoir_capacity = { # GWh
'SE1':11326,
'SE2':13533,
'SE3':1790,
'SE4':180,
'FI':2952,
'NO1':6078,
'NO2':21671,
'NO3':7719,
'NO4':14676,
'NO5':14090,
'LT':11.8,
'LV':9.4,
'DE':2430,
'PL':2800,
'GB':4100,
}
def vre_cap_2016(self):
""" Set wind and solar capacities to values from 2016, for validation of model with MAF data for this year """
pass
# SOLAR CAPACITY
self.opt_solar_cap_by_area = {
'DK1':421, # from ENTSO-E
'DK2':180,
'PL':77,
'NL':1429,
'DE':40679,
'GB':11914,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics
'DK':851,
'FI':39,
'NO':27,
'SE':153,
'LV':1,
'LT':70,
'EE':10,
}
# MAF WIND CAPACITY
self.opt_wind_capacity_onsh = {
'DK1':2966,
'DK2':608,
'EE':375,
'FI':2422,
'LT':366,
'LV':55,
'NO1':0,
'NO2':261,
'NO3':361,
'NO4':251,
'NO5':0,
'SE1':524,
'SE2':2289,
'SE3':2098,
'SE4':1609,
'PL':5494,
'NL':3284,
'DE':45435,
'GB':10833,
}
self.opt_wind_capacity_offsh = {
'DK1':843,
'DK2':428,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':357,
'DE':4000,
'GB':5293,
}
def run(self,save_model=False):
""" Run single case of model, for current settings """
pass
self.res_time = {}
t_0 = time.time()
self.setup()
self.res_time['pre'] = time.time() - t_0
t__0 = time.time()
self.setup_child_model()
self.res_time['cm'] = time.time() - t__0
self.solve()
t__0 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t__0
self.res_time['tot'] = time.time() - t_0
if save_model:
self.save_model()
def run_years(self,years=range(2015,2017),append=False,save_full_model=False):
""" Run model using weather data for multiple years between start and end
save_full_model: Save full model using save_model for start year in root path
"""
start = years[0]
self.opt_weather_year = start
self.update_path(self.root_path/f'{start}')
# run first instance of model
self.run()
self.save_model_run(append=append)
if save_full_model:
self.update_path(self.root_path)
self.save_model()
# update weather data and run remaining instances
for year in years[1:]:
self.update_path(self.root_path/f'{year}')
self.re_run_year(year=year)
self.save_model_run(append=append)
def re_run_year(self,year=2015):
""" Update the weather year and re-run model """
print(f'---- RE-RUN YEAR {year} -----')
self.res_time = {}
t_0 = time.time()
self.opt_weather_year = year
self.setup_weather_indices()
self.get_inflow_data()
self.setup_inflow()
self.setup_run_of_river()
self.setup_inflow_feasibility()
self.max_HROR = {
(a,t):self.ror_hourly.at[self.timerange[t],a]*MWtoGW for a in self.ror_areas for t in self.idx_time
}
self.setup_solar()
self.setup_wind()
self.max_SOLAR = {
(a,t):self.solar.at[self.timerange[t],a] for a in self.solar_areas for t in self.idx_time
}
self.max_WIND = {
(a,t):self.wind.at[self.timerange[t],a]*self.opt_wind_scale_factor[a]
for a in self.wind_areas for t in self.idx_time
}
for name in ['max_WIND','max_SOLAR']:
self.round_bound(name)
#%%
if self.opt_run_initialization:
self.run_init_model()
#%%
self.res_time['pre'] = time.time() - t_0
t_1 = time.time()
self.cm.update_inflow()
self.cm.update_ror(self.max_HROR)
self.cm.update_solar(self.max_SOLAR)
self.cm.update_wind(self.max_WIND)
self.res_time['cm'] = time.time() - t_1
#%% rerun model
self.solve()
t_1 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t_1
self.res_time['tot'] = time.time() - t_0
print(f'------ FINISHED YEAR {year} --------')
def load_results_years(self,vars=['res_PG','res_LS'],years=None):
""" Get given results for all yearly runs"""
res = {
v:{} for v in vars
}
exist_years = []
for y in [y for y in os.listdir(self.root_path) if os.path.isdir(self.root_path / y)]:
try:
exist_years.append(int(y))
except Exception:
pass
if years is None:
years = exist_years
else:
years = [y for y in exist_years if y in years]
# get results from all runs
for y in years:
self.load_model_run(y)
for v in vars:
res[v][y] = self.__getattribute__(v)
return res
def round_bound(self,name):
prt = self.opt_print['setup']
if name in self.opt_bound_cut:
thrs = self.opt_bound_cut[name]
dic = self.__getattribute__(name)
count = 0
for i,val in dic.items():
if val > 0 and val < thrs:
dic[i] = 0
count += 1
if count and prt:
print(f'Rounded {count} values to zero in {name}')
def save_model(self):
"""
Dump all model results to pickle file. Also save options, gen data etc., as well as self.runs
Can produce very large file if several runs are stored in self.runs
The valus saved are sufficient to rerun all plot functions, after first calling setup_data
"""
d = {}
save_vars = ['runs','ror_areas','generators_def','hydrores','areas','wind_areas','solar_areas','pump_res_areas',
'pump_areas','ror_reserve_areas','nuclear_areas','resareas','syncareas','gen_in_area',
'xtrans_int','xtrans_ext','rescountries','reservoir_capacity','pump_reservoir','fixed_transfer_connections',
'fixed_price_connections','area_sep_str','solar_capacity',
]
vars = [v for v in dir(self) if v.split('_',1)[0] in ['res','gen','idx','opt','fopt','dual','max','min'] or v in save_vars]
for v in vars:
d[v] = self.__getattribute__(v)
with open(self.root_path/f'results.pkl','wb') as f:
pickle.dump(d,f)
def save_model_run(self,append=False):
"""
Dump results from current model run in results.pkl
If append=True, results are also appended to list in self.runs
Storing many runs in self.runs can consume lots of memory, so it may
be better just to save the pickle files and load them when needed
"""
# save_entities = ['inflow_hourly','weeks','inflow','inflow_hourly_tmp','ror_hourly']
save_entities = []
run = {
v:self.__getattribute__(v) for v in [ v for v in dir(self) if v.split('_',1)[0] == 'res' or v in save_entities]
}
run['opt_weather_year'] = self.opt_weather_year
if append:
self.runs.append(run)
with open(self.res_path/f'results.pkl','wb') as f:
pickle.dump(run,f)
def load_model(self):
with open(self.res_path/f'results.pkl','rb') as f:
d = pickle.load(f)
for v in d:
self.__setattr__(v,d[v])
def load_model_run(self,year=2015):
self.res_path = self.root_path / f'{year}'
self.load_model()
self.res_path = self.root_path
def redo_plots(self):
print('----- REDO PLOTS -----')
self.load_model()
self.setup_indices()
self.setup_weather_indices()
self.setup_data()
self.get_rmse_data()
self.plot_figures()
def setup_child_model(self):
""" Create the Pyomo/Gorubi model object """
api = self.opt_api
solver = self.opt_solver
# Choose child model "cm" class depending on api type
if api == 'gurobi' and solver == 'gurobi':
from gurobi_model import GurobiModel
self.cm = GurobiModel(name=self.name)
else:
if api == 'gurobi':
print(f'WARNING: Can only use gurobi api with gurobi, using pyomo api!')
from pyomo_model import PyomoModel
self.cm = PyomoModel()
self.cm.setup_opt_problem(self)
def setup(self):
pass
prt = self.opt_print['setup']
self.vars_df_up_bound = {
'WIND':['wind_areas','idx_time'],
'SOLAR':['solar_areas','idx_time'],
'LS':['areas','idx_time'],
'HROR':['ror_areas','idx_time'],
}
print('----- SETUP -------------')
self.setup_indices()
self.setup_weather_indices()
self.setup_transmission()
if prt:
print('----- SETUP DATA --------')
self.setup_data()
if prt:
print('----- SETUP GEN ---------')
self.setup_gen()
if prt:
print('----- SETUP RESERVES ----')
self.setup_reserves()
if prt:
print('----- SETUP HYDRO -------')
self.setup_hydro()
if prt:
print('----- SETUP WIND --------')
self.setup_wind()
if prt:
print('----- SETUP SOLAR -------')
self.setup_solar()
if prt:
print('----- SETUP RESERVOIR ---')
self.setup_reservoir_values()
if prt:
print('----- SETUP INFLOW ------')
self.setup_inflow()
if prt:
print('----- SETUP ROR --------')
self.setup_run_of_river()
self.setup_inflow_feasibility()
if prt:
print('----- SETUP BOUNDS -----')
self.setup_bounds()
if self.opt_run_initialization:
self.run_init_model()
print('----- SETUP COMPLETE ----')
self.print_hydro_table()
self.print_renewable_table()
def solve(self):
""" Solve model """
print(' ----- STARTING SOLVER -----')
prt = self.opt_print['solver']
solver = self.opt_solver
if not hasattr(self,'cm'):
print('Model does not have child model, run "setup_child_model"')
return None
elif self.cm.api == 'pyomo': # pyomo model
## DECLARE DUAL
if not hasattr(self.cm,'dual'):
self.cm.dual = pye.Suffix(direction=pye.Suffix.IMPORT)
## SOLVE MODEL
if solver in solver_executables: # give explicit solver path
opt = pye.SolverFactory(solver,executable=solver_executables[solver],options=self.opt_solver_opts)
else:
opt = pye.SolverFactory(solver,options=self.opt_solver_opts)
res = opt.solve(self.cm, tee=prt)
if 'Time' in res['solver'][0]:
self.res_time['solver'] = res['solver'][0]['Time']
else:
self.res_time['solver'] = np.nan
self.res_stats = {
name:res['problem'][0][solver_stats['pyomo'][name]] for name in solver_stats['pyomo']
}
else: # gurobi model
if not prt:
self.cm.gm.setParam('OutputFlag',0)
self.cm.gm.optimize()
self.res_time['solver'] = self.cm.gm.Runtime
self.res_stats = {
name:self.cm.gm.getAttr(solver_stats['gurobi'][name]) for name in solver_stats['gurobi']
}
print(' ----- FINISHED SOLVER -----')
def post_process(self):
""" Post-processing of optimization results and plotting of figures """
print('----- POST PROCESS ------')
prt = self.opt_print['postprocess']
############### RESULTS ##########################
self.res_residuals = {} # residuals to check supply == demand
self.res_rmse_area = pd.DataFrame(dtype=float,index=self.areas,columns=['Prod','Hydro','Thermal','Nuclear','Price'])
self.res_rmse_intcon = pd.DataFrame(index=self.xtrans_int.index,columns=['From','To','RMSE'])
# self.res_rmse_intcon.loc[:,['From','To']] = self.xtrans_int.loc[:,['from','to']]
self.res_rmse_intcon['From'] = self.xtrans_int['from']
self.res_rmse_intcon['To'] = self.xtrans_int['to']
self.res_rmse_extcon = pd.DataFrame(index=self.xtrans_ext.index,columns=['From','To','RMSE'])
# self.res_rmse_extcon.loc[:,['From','To']] = self.xtrans_ext.loc[:,['from','to']]
self.res_rmse_extcon['From'] = self.xtrans_ext['from']
self.res_rmse_extcon['To'] = self.xtrans_ext['to']
self.res_rmse_area_norm = self.res_rmse_area.copy()
self.res_rmse_intcon_norm = pd.Series(index=self.xtrans_int.index)
self.res_rmse_extcon_norm = pd.Series(index=self.xtrans_ext.index)
# if given path, override
self.get_df_bounds()
self.get_results_from_child()
self.get_rmse_data()
if prt:
print('----- POST CALC. -------')
self.post_process_calculations()
# some more curtailment stats
print('----- PLOT FIGURES -----')
self.plot_figures()
#self.plot_offer_curves(self.supply_curve_hour)
self.print_rmse()
# writing output takes too long for large models
if self.fopt_print_dual_text:
with open(self.res_path / 'dual.txt','w') as f:
with redirect_stdout(f):
self.dual.display()
if self.fopt_print_text:
with open(self.res_path / 'model.txt','w') as f:
with redirect_stdout(f):
self.pprint()
if self.opt_print['check']:
print('----- CHECK RESULTS ----')
print(f'Maximum residual: {max([self.res_residuals[area] for area in self.res_residuals])}')
print(f'Average losses: {np.mean(self.res_losses):0.4f} %')
print('Errors:')
print(f'Production: {self.res_rmse_area["Prod"].mean():0.4f}')
print(f'Hydro: {self.res_rmse_area["Hydro"].mean():0.4f}')
print(f'Thermal: {self.res_rmse_area["Thermal"].mean():0.4f}')
print(f'Transfer: {self.res_rmse_intcon["RMSE"].mean():0.4f}')
print(f'External transfer: {self.res_rmse_extcon["RMSE"].mean():0.4f}')
print(f'Price: {self.res_rmse_area["Price"].mean():0.4f}')
def run_init_model(self):
t_0 = time.time()
prt = self.opt_print['init']
if prt:
print('------- RUN INIT MODEL --------')
self.setup_init_model()
self.solve_init_model()
self.postprocess_init_model()
if prt:
print('------- INIT MODEL COMPLETE ---')
self.res_time['ini'] = time.time() - t_0
def setup_init_model(self):
"""
This sets up a low resolution model, which is solved to get values with which to initialize the hourly model
:return:
"""
from pyomo_init_model import PyomoInitModel
print_output = self.opt_print['init']
self.ini = EmptyObject()
t0 = time.time()
delta = self.opt_init_delta
if print_output:
print(f'Time step of {delta} hours')
pass
self.timerange_lr = [self.timerange[t] for t in range(0,self.nPeriods,delta)]
# delta = 168 # resolution of model
# compute number of periods
self.nPeriods_lr = int(np.ceil(self.nPeriods / delta))
# map hour indices to periods
p2i = {}
i2p = {}
for pidx in range(self.nPeriods_lr):
# h2p[i] = range()
p2i[pidx] = range(pidx*delta,min((pidx+1)*delta,self.nPeriods))
for pidx in p2i:
for i in p2i[pidx]:
i2p[i] = pidx
self.p2i = p2i
self.i2p = i2p
entities = [e for e in ['solar','wind','exchange','exchange_capacity','demand','ror_hourly','inflow_hourly'] \
if hasattr(self,e)]
for name in entities:
df = self.__getattribute__(name)
self.__setattr__(f'{name}_lr',df.resample(f'{delta}H').mean())
if self.opt_use_var_cost:
self.gen_c1_lr = self.gen_c1.resample(f'{delta}H').mean()
self.gen_c2_lr = self.gen_c2.resample(f'{delta}H').mean()
# interpolate reservoir values for initialization
self.reservoir_interp_lr = interp_time(self.timerange_lr,self.reservoir_fix)
self.setup_bounds(lowres=True)
self.cmi = PyomoInitModel()
self.cmi.setup_opt_problem(self)
self.ini.time_setup = time.time() - t0
def solve_init_model(self):
print_output = self.opt_print['init']
solver = self.opt_solver
self.cmi.dual = pye.Suffix(direction=pye.Suffix.IMPORT)
## SOLVE MODEL
if solver in solver_executables:
opt = pye.SolverFactory(solver,executable=solver_executables[solver])
else:
opt = pye.SolverFactory(solver)
t0 = time.time()
self.ini.res = opt.solve(self.cmi, tee=print_output)
self.ini.time_solve = time.time() - t0
def postprocess_init_model(self):
pass
print_output = self.opt_print['init']
t0 = time.time()
""" Get result variables, duals, and bounds from optimization problem """
mo = self.ini
mo.obj = self.cmi.get_objective_value()
# read results into dataframes
if print_output:
print('Reading results into Panda data frames')
for v in self.pp_opt.get_vars:
entity = self.cmi.get_variable(f'var_{v}')
# convert to date index
entity.index = self.timerange_lr
mo.__setattr__(f'res_{v}',entity)
# increment time index of instantaneous variables
for var in [v for v in self.pp_opt.inst_vars if v in self.pp_opt.get_vars]:
entity = mo.__getattribute__(f'res_{var}')
entity.index += datetime.timedelta(hours=self.opt_init_delta)
# get dual variables
if print_output:
print('Getting dual variables')
for v in self.pp_opt.get_duals:
constr = f'constr_{v}'
if hasattr(self.cmi,constr):
entity = self.cmi.get_dual(constr)
# convert to date index
if v not in ['FIX_RESERVOIR']:
entity.index = self.timerange_lr
mo.__setattr__(f'dual_{constr}',entity)
# dic[f'dual_{constr}'] = entity
# interpolate reservoir values
mo.reservoir_interp = pd.DataFrame(dtype=float,index=self.timerange_p1,columns=self.hydrores)
mo.reservoir_interp.loc[self.timerange[0],:] = self.reservoir_fix.loc[self.timerange[0],:]
mo.reservoir_interp.loc[self.timerange_p1[-1],:] = self.reservoir_fix.loc[self.timerange_p1[-1],:]
mo.reservoir_interp.loc[mo.res_RES.index[:-1],:] = np.array(mo.res_RES.loc[mo.res_RES.index[:-1],:])
mo.reservoir_interp.interpolate(inplace=True)
mo.time_post = time.time() - t0
def setup_indices(self):
prt = self.opt_print['setup']
self.starttime = self.opt_start + ':00'
self.endtime = (str_to_date(self.opt_end) + datetime.timedelta(hours=24)).strftime('%Y%m%d') + ':00'
# defined quantities from options
self.areas = []
for c in self.opt_countries:
for a in country_to_areas[c]:
self.areas.append(a)
self.single_area_countries = [c for c in self.opt_countries if country_to_areas[c].__len__() == 1]
self.multi_area_countries = [c for c in self.opt_countries if country_to_areas[c].__len__() > 1]
self.syncareas = [a for a in self.areas if a in synchronous_areas]
self.country_to_areas = { # country to areas for countries included in model
c:country_to_areas[c] for c in self.opt_countries
}
self.area_to_country = {
a:area_to_country[a] for a in self.areas
}
self.hydrores = [area for area in self.areas if 'Hydro' in generators_def[area]]
# note: period with index 0 is starting period
# self.start = str_to_date(self.starttime) + datetime.timedelta(hours=-1)
# self.end = str_to_date(self.endtime)
self.timerange = pd.date_range(start=str_to_date(self.starttime),
end=str_to_date(self.endtime)+datetime.timedelta(hours=-1),freq='H')
self.timerange_p1 = pd.date_range(start=str_to_date(self.starttime),
end=str_to_date(self.endtime),freq='H')
self.nPeriods = self.timerange.__len__()
self.idx_time = range(self.nPeriods)
# day_fmt = '%Y%m%d'
self.daysrange_p1 = pd.date_range(start=self.timerange_p1[0],end=self.timerange_p1[-1],freq='D')
self.daysrange = self.daysrange_p1[:-1]
self.nDays = self.daysrange_p1.__len__() - 1
self.idx_day = range(self.nDays)
# map hours to days
self.hour2day = {
t:int(np.floor_divide(t,24)) for t in self.idx_time
}
self.day2hour = {
d:[t for t in self.idx_time if self.hour2day[t] == d] for d in self.idx_day
}
#%% set start/end time for weather data (wind,solar,hydro)
start_year = int(self.opt_start[:4])
self.start_year = start_year
def setup_weather_indices(self):
""" Setup indices related to weather year, which effects the inflow data and (for the MAF data and Merra solar data)
the wind and solar production
"""
start_year = self.start_year
# check that weather year is within maf range
if (self.opt_use_maf_inflow or self.opt_use_maf_pecd) and \
(self.opt_weather_year > 2016 or self.opt_weather_year < 1982):
print(f'WARNING: No maf data for {self.opt_weather_year}, setting weather year to 2016')
self.opt_weather_year = 2016
self.weather_year_diff = self.opt_weather_year - start_year
sfmt = '%Y%m%d:%H'
self.starttime2 = (datetime.datetime(year=start_year+self.weather_year_diff,
month=int(self.starttime[4:6]),
day=int(self.starttime[6:8]))).strftime(sfmt)
self.endtime2 = (datetime.datetime(year=int(self.endtime[:4])+self.weather_year_diff,
month=int(self.endtime[4:6]),
day=int(self.endtime[6:8]))).strftime(sfmt)
# week to date conversion
if self.opt_use_maf_inflow:
# maf data starts on Sunday
self.wd = WeekDef(week_start=7,proper_week=False)
else:
# use ISO week definition
self.wd = WeekDef(week_start=4,proper_week=True)
# get week range covering whole simulation period
# Note: since the simulated year may be leap year, we may need one more day of data, hence get extra week
start = self.starttime2
end = (datetime.datetime.strptime(self.endtime2,sfmt)+datetime.timedelta(days=7)).strftime(sfmt)
self.weeks = self.wd.range2weeks(start,end,sout=True)
self.widxs = self.wd.range2weeks(start,end,sout=False)
# find index offset, we will interpolate inflow data for the whole range in weeks/widxs
# and then use df[inflow_offset:inflow_offset+nPeriods] as the inflow data
self.inflow_offset = int((datetime.datetime.strptime(self.starttime2,sfmt)-self.widxs[0]).seconds/3600)
dstart = str_to_date(self.starttime2)
self.daysrange_weather_year = pd.date_range(start=dstart,end=dstart+datetime.timedelta(days=self.nDays-1),freq='D')
def get_inflow_data(self):
if self.opt_use_maf_inflow: # get MAF inflow data
# get both weekly and daily maf inflow data
self.inflow,self.inflow_daily_maf = self.maf_hydro_db.select_inflow_bidz_wrap(starttime=self.weeks[0],
endtime=self.weeks[-1],
areas=self.hydrores,
wd=self.wd,date_index=True)
else: # entsoe inflow
self.inflow = self.inflow_db.select_inflow_data(starttime=self.weeks[0],
endtime=self.weeks[-1],
areas=self.hydrores,
table='inflow',wd=self.wd,date_index=True)
def db_exists(self,db='prices.db'):
# Check that database exists
pass
if not os.path.isfile(self.data_path / db):
raise Error(f"Database {db} does not exist!")
def setup_data(self):
prt = self.opt_print['setup']
# Check that databases exist
for db in [f for f in self.opt_db_files if f != 'unit']:
self.db_exists(self.db_path / self.opt_db_files[db])
self.price_db = entsoe.Database(db=self.db_path / self.opt_db_files['prices'])
self.exchange_db = entsoe.Database(db=self.db_path / self.opt_db_files['exchange'])
self.load_db = entsoe.Database(db=self.db_path / self.opt_db_files['load'])
self.reservoir_db = entsoe.Database(db=self.db_path / self.opt_db_files['reservoir'])
self.inflow_db = entsoe.Database(db=self.db_path / self.opt_db_files['inflow'])
self.gen_db = entsoe.Database(db=Path(self.db_path) / self.opt_db_files['gen'])
self.maf_pecd_db = maf_pecd_data.Database(db=Path(self.db_path) / self.opt_db_files['maf_pecd'])
self.maf_hydro_db = maf_hydro_data.Database(db=Path(self.db_path) / self.opt_db_files['maf_hydro'])
self.capacity_db = entsoe.Database(db=Path(self.db_path) / self.opt_db_files['capacity'])
if self.opt_nucl_individual_units:
self.db_exists(self.db_path / self.opt_db_files['unit'])
self.unit_db = entsoe.DatabaseGenUnit(db=Path(self.db_path)/self.opt_db_files['unit'])
starttime = self.starttime
endtime = self.endtime
cet = False
if prt:
print('Loading Excel data')
self.load_shares = pd.read_excel(self.data_path / f'load_shares.xlsx',index_col=0,squeeze=True)
for a in self.areas:
if a not in self.load_shares.index:
self.load_shares.at[a] = 1
# load generation statistics
self.stats = pd.read_excel(self.data_path / 'gen_stats.xlsx',header=[0,1],index_col=0,sheet_name=f'{self.opt_capacity_year}')
# load entsoe capacities
self.gen_capacity = self.capacity_db.select_capacity_wrap(areas=self.areas,year=self.opt_capacity_year)
if prt:
print('Loading GenPerType data')
# Used to plot generation per type and to complement missing Nordpool data
# aggregate hydro and thermal generation
self.entsoe_data = self.gen_db.select_gen_per_type_wrap_v2(starttime=starttime,endtime=endtime,
type_map=entsoe_type_map,cet_time=cet,drop_data=False,
areas=self.areas,print_output=prt,drop_pc=95)
if prt:
print('Loading demand data')
# demand data
self.demand = self.load_db.select_load_wrap(starttime=starttime,endtime=endtime,cet_time=cet,areas=self.areas,print_output=prt)
# reservoir content
self.reservoir = self.reservoir_db.select_reservoir_wrap(starttime=starttime,endtime=endtime,
areas=self.areas,cet_time=cet,normalize=self.opt_reservoir_data_normalized,offset=self.opt_reservoir_offset)
# Load production data for individual units
if self.opt_nucl_individual_units:
self.prod_per_unit,self.units = self.unit_db.select_data(start=starttime,end=endtime,
countries=[c for c in self.opt_countries if \
sum([1 for a in country_to_areas[c] if a in self.opt_nucl_individual_units])])
if prt:
print('Loading external price data')
# price data - only needed for external areas with variable transfer
self.price_external = self.price_db.select_price_data(starttime=starttime,endtime=endtime,cet_time=cet,
areas=self.opt_set_external_price)
# get flows for fixed connections
if prt:
print('Loading external exchange data')
self.exchange = self.exchange_db.select_flow_data( \
connections=list(self.xtrans_ext.loc[self.fixed_transfer_connections,'label_fw']),
starttime=starttime,
endtime=endtime,
table=self.opt_exchange_data_type,
cet_time=cet,
area_sep=self.area_sep_str)
if prt:
print('Loading exchange capacities')
# load exchange capacity
if self.opt_use_var_exchange_cap:
self.exchange_capacity = self.exchange_db.select_flow_data(table='capacity',area_sep=self.area_sep_str,cet_time=cet,
starttime=starttime,endtime=endtime,print_output=prt,
connections=list(self.xtrans_int['label_fw'])+list(self.xtrans_int['label_bw']) + \
list(self.xtrans_ext.loc[self.fixed_price_connections,'label_fw'])+ \
list(self.xtrans_ext.loc[self.fixed_price_connections,'label_bw']),
drop_na_col=True)
if prt:
print('Loading inflow data')
self.get_inflow_data()
impute_list = ['reservoir','inflow','demand','price_external','exchange']
# if self.opt_use_var_exchange_cap:
# self.impute_capacity_values()
# interpolate missing values in data
self.impute_values(impute_list,limit=self.opt_impute_limit,prt=prt)
# scale up demand
self.demand = self.demand * self.opt_load_scale
# replace negative values with zeros
for name in self.opt_nonnegative_data:
entity = self.__getattribute__(name)
entity.clip(0,inplace=True)
def setup_gen(self):
prt = self.opt_print['setup']
if prt:
print('Setting up generators')
stats = self.stats
self.generators_def = generators_def
# get generator data
nGen = 0
gidx = 1
self.gen_data = pd.DataFrame(index=range(1,nGen+1),
columns=['area','gtype','c2','c1','c0','pmax','pmin','rampup','rampdown'])
# load cost fit
with open(self.data_path/f'costfit/{self.opt_costfit_tag}_fit.pkl','rb') as f:
self.costfit = pickle.load(f)
for area in self.areas:
for gtype in self.generators_def[area]:
if (gtype == 'Hydro' and self.opt_pmax_type_hydro == 'stats') or \
(gtype != 'Hydro' and self.opt_pmax_type == 'stats'):
pmax = stats.at['max',(area,gtype)]
else:
pmax = self.gen_capacity.at[area,gtype]
if np.isnan(pmax): # missing values, use from stats
pmax = stats.at['max',(area,gtype)]
if prt:
print(f'No entso-e capacity value for {area} {gtype}')
pmin = stats.at['min',(area,gtype)]
if pmin > pmax: # pmin value from production stats may exceed capacity, fix this
pmin = 0
rampup = stats.at['maxramp',(area,gtype)]
rampdown = stats.at['minramp',(area,gtype)]
# cost coefficients
c0 = 0
if gtype == 'Nuclear':
c2 = 0
c1 = self.opt_nuclear_cost
elif gtype == 'Hydro':
c2 = 0
c1 = 0
else: # Thermal
c2 = self.costfit[area][gtype]['k']/2
c1 = self.costfit[area][gtype]['mavg']
if self.opt_co2_price is not None and self.opt_co2_price > 0:
c1 += self.opt_co2_price*self.opt_co2_intensity
if self.opt_co2_price_offset_year is not None:
c1 -= co2_price_ets[self.opt_co2_price_offset_year]*self.opt_co2_intensity
# check if cost parameters are strange, e.g. decreasing marginal cost
if c2 < 0 or np.isnan(c2):
c2 = 0
c1 = self.opt_default_thermal_cost
if prt:
print(f'Using default constant MC costs for {area} {gtype}')
self.gen_data = self.gen_data.append(pd.DataFrame(columns=self.gen_data.columns,index=[gidx],
data=[[area,gtype,c2,c1,c0,pmax,pmin,rampup,rampdown]]))
gidx += 1
self.nGen = gidx - 1
self.idx_gen = range(1,self.nGen+1)
self.idx_thermal_gen = [g for g in self.idx_gen if self.gen_data.at[g,'gtype'] == 'Thermal']
# generators with non-zero marignal cost
self.idx_cost_gen = [g for g in self.idx_gen if not self.gen_data.at[g,'gtype'] == 'Hydro']
if self.opt_pmin_zero:
self.gen_data.loc[:,'pmin'] = 0
# set maximum nuclear capacity based on this week
for g in self.gen_data.index:
if self.gen_data.at[g,'gtype'] == 'Nuclear':
# note: this is maximum for whole period, is actually not used
# instead weekly maximum values are used
self.gen_data.at[g,'pmax'] = self.entsoe_data[self.gen_data.at[g,'area']]['Nuclear'].max()
# overwrite nuclear cost
if not self.opt_nuclear_cost is None:
self.gen_data.at[g,'c1'] = self.opt_nuclear_cost
# overwrite nuclear ramp rate
if not self.opt_nucl_ramp is None:
self.gen_data.at[g,'rampup'] = self.gen_data.at[g,'pmax'] * self.opt_nucl_ramp/100
self.gen_data.at[g,'rampdown'] = - self.gen_data.at[g,'pmax'] * self.opt_nucl_ramp/100
def tag_gen_cost():
pass
if prt:
print('Setting up generator variable costs')
# generator variable costs
if self.opt_use_var_cost:
self.gen_c2 = pd.DataFrame(dtype=float,index=self.timerange,columns=self.gen_data.index)
self.gen_c1 = pd.DataFrame(dtype=float,index=self.timerange,columns=self.gen_data.index)
for g in self.idx_gen:
area = self.gen_data.at[g,'area']
gtype = self.gen_data.at[g,'gtype']
print_flag_areas = []
binstart = str_to_date(self.costfit['starttime'])
binsize = self.costfit['binsize']
for t in self.idx_time:
# it is assumed costs are fitted for correct year
# get costs depending on type
if gtype == 'Thermal': # variable cost data
dt = self.timerange[t]
c2 = self.costfit[area][gtype]['k']/2
c1 = self.costfit[area]['Thermal']['m'][time_to_bin(
dt,binstart=binstart,binsize=binsize)]
if self.opt_co2_price is not None and self.opt_co2_price > 0:
c1 += self.opt_co2_price*self.opt_co2_intensity
if self.opt_co2_price_offset_year is not None:
c1 -= co2_price_ets[self.opt_co2_price_offset_year]*self.opt_co2_intensity
if self.opt_overwrite_bad_costfits and (c2 < self.opt_c2_min or c2 > self.opt_c2_max):
# use default cost
c2 = self.gen_data.at[g,'c2']
c1 = self.gen_data.at[g,'c1']
# show message about overwrite
if area not in print_flag_areas:
print_flag_areas.append(area)
if prt:
print(f'Using constant costs for {area}')
else: # use constant costs from gen_data
c2 = self.gen_data.at[g,'c2']
c1 = self.gen_data.at[g,'c1']
self.gen_c2.at[self.timerange[t],g] = c2
self.gen_c1.at[self.timerange[t],g] = c1
# calculate maximum nuclear generation per week
# USE INDIVIDUAL NUCLEAR GENERATION DATA
def tag_nuclear():
pass
if prt:
print('Setting up nuclear generation')
self.nuclear_areas = [a for a in self.areas if 'Nuclear' in self.generators_def[a]]
for a in self.nuclear_areas:
if a not in self.opt_nucl_add_cap:
self.opt_nucl_add_cap[a] = 0.0
#%%
# variable nuclear limit
self.nuclear_hourly = pd.DataFrame(dtype=float,index=self.timerange_p1,columns=self.nuclear_areas)
# fix values for 1 day intervals, the do linear interpolation
self.nuclear_units = {}
for a in self.nuclear_areas:
if a in self.opt_nucl_individual_units:
self.nuclear_units[a] = [idx for idx in self.units.index if self.units.at[idx,'type'] == 'Nuclear'
and self.units.at[idx,'country'] == self.area_to_country[a]
and self.units.at[idx,'name'] not in self.opt_nucl_units_exclude]
max_rolling = self.prod_per_unit.loc[:,self.nuclear_units[a]].sum(axis=1).rolling(
window=168,min_periods=1,center=True).max()
else:
max_rolling = self.entsoe_data[a]['Nuclear'].rolling(window=168,min_periods=1,center=True).max()
for d in self.daysrange_p1:
self.nuclear_hourly.at[d,a] = max_rolling.at[d] + self.opt_nucl_add_cap[a]
# interpolate linearly
self.nuclear_hourly.interpolate(inplace=True)
# combined generators - define which generator units make up generators with ramp constraints
# Note: Several units of the same type can be used within an area, e.g. in order to create a piecewise linear
# cost function for that type and area. Some constraints, e.g. ramp constraints, should then be enforced on
# the aggregate production of those generators. For this reason there is a set for the "combined generators"
self.gen_comb = {}
idx = 1
for area in self.areas:
for gtype in self.generators_def[area]:
# find all generator units which belong to this generator
units = []
for gen in self.gen_data.index:
if self.gen_data.at[gen,'area'] == area and self.gen_data.at[gen,'gtype'] == gtype:
units.append(gen)
self.gen_comb[idx] = units
idx += 1
self.nGenComb = idx-1
self.gen_data_comb = pd.DataFrame(index=range(1,self.nGenComb+1),columns=['rampup','rampdown'])
for index in self.gen_data_comb.index:
self.gen_data_comb.at[index,'rampup'] = self.gen_data.at[self.gen_comb[index][0],'rampup']
self.gen_data_comb.at[index,'rampdown'] = self.gen_data.at[self.gen_comb[index][0],'rampdown']
# generators in each area
self.gen_in_area = {}
for area in self.areas:
self.gen_in_area[area] = [g for g in range(1,self.nGen+1) if self.gen_data.at[g,'area'] == area]
def setup_reserves(self):
prt = self.opt_print['setup']
for c in self.opt_countries:
if c not in self.opt_reserves_fcrn:
self.opt_reserves_fcrn[c] = 0
# amount of reserves per area
self.reserve_data = pd.DataFrame(index=self.areas,columns=['FCR-N','FCR-D','Rp','Rn'])
for area in self.reserve_data.index:
if 'SE' in area:
country = 'SE'
elif 'NO' in area:
country = 'NO'
elif 'DK' in area:
country = 'DK'
else:
country = area
#all_areas = [a for a in self.areas if country in a]
gen_hydro = [idx for idx in self.gen_data.index if self.gen_data.at[idx,'gtype'] == 'Hydro']
gen_area = [idx for idx in self.gen_data.index if self.gen_data.at[idx,'area'] == area]
gen_country = [idx for idx in self.gen_data.index if country in self.gen_data.at[idx,'area']]
# allocate in proportion to share of hydro generation
if self.gen_data.loc[intersection(gen_hydro,gen_country),'pmax'].sum() > 0:
self.reserve_data.at[area,'FCR-N'] = self.opt_reserves_fcrn[country]* \
self.gen_data.loc[intersection(gen_hydro,gen_area),'pmax'].sum() / self.gen_data.loc[intersection(gen_hydro,gen_country),'pmax'].sum()
else:
self.reserve_data.at[area,'FCR-N'] = 0
# FCR-D in proportion to FCR-N
self.reserve_data.at[area,'FCR-D'] = self.reserve_data.at[area,'FCR-N'] * self.opt_reserves_fcrd / np.sum([self.opt_reserves_fcrn[a] for a in self.opt_reserves_fcrn])
self.reserve_data.at[area,'Rp'] = self.reserve_data.at[area,'FCR-N'] + self.reserve_data.at[area,'FCR-D']
self.reserve_data.at[area,'Rn'] = self.reserve_data.at[area,'FCR-N']
# areas with reserves
self.resareas = [area for area in self.areas if self.reserve_data.at[area,'FCR-N'] > 0]
# generators providing reserves in each area
self.reserve_gens = {}
for area in self.resareas:
self.reserve_gens[area] = [gen for gen in self.gen_data.index if self.gen_data.at[gen,'area'] == area and self.gen_data.at[gen,'gtype'] == 'Hydro']
# countries with reserves
self.rescountries = [c for c in self.opt_reserves_fcrn if self.opt_reserves_fcrn[c] > 0]
# generators providing reserves in each country
self.reserve_gens_country = {}
for c in self.rescountries:
self.reserve_gens_country[c] = [gen for gen in self.gen_data.index if self.gen_data.at[gen,'area'] in self.country_to_areas[c] and self.gen_data.at[gen,'gtype'] == 'Hydro']
self.reserve_country_data = pd.DataFrame(index=self.rescountries,columns=self.reserve_data.columns)
for c in self.reserve_country_data.columns:
for i in self.reserve_country_data.index:
self.reserve_country_data.at[i,c] = self.reserve_data.loc[self.country_to_areas[i],c].sum()
def setup_hydro(self):
prt = self.opt_print['setup']
self.reservoir_capacity = self.opt_reservoir_capacity.copy()
# get missing reservoir capacity as maximum reservoir value
self.reservoir_max = self.reservoir_db.select_max(table_type='reservoir',areas=self.areas)
for a in self.hydrores:
if a not in self.reservoir_capacity or not self.reservoir_capacity[a]:
self.reservoir_capacity[a] = self.reservoir_max.at[a]
self.pump_reservoir = self.opt_pump_reservoir.copy()
# reservoir content in TWh
for i,val in self.reservoir_capacity.items():
self.reservoir_capacity[i] = val * GWtoTW
for i,val in self.pump_reservoir.items():
self.pump_reservoir[i] = val * GWtoTW
# One reservoir per area with hydro
# Mapping from each reservoir to its connected hydro stations
self.reservoir2hydro = {}
for area in self.hydrores:
self.reservoir2hydro[area] = []
for idx,gen in self.gen_data.iterrows():
if gen['area'] == area and gen['gtype'] == 'Hydro':
self.reservoir2hydro[area].append(idx)
for a in self.hydrores:
if a not in self.opt_ror_fraction:
self.opt_ror_fraction[a] = 0
# areas with run of river hydro
self.ror_areas = [a for a in self.areas if a in self.opt_ror_fraction and self.opt_ror_fraction[a] > 0]
self.ror_countries = [c for c in self.opt_countries if sum([1 for a in self.country_to_areas[c] if a in self.ror_areas])]
# check which areas have hydro run of river with reserves
# Note: Run of river hydro is modelled as separate production (like wind and solar),
# and is not entered within the set of generators. However, when enforcing upward reserve constraints,
# run of river production still decreases the potential for providing upward reserves from hydro production.
# Thus
self.ror_reserve_areas = []
for a in self.resareas:
#if a in self.set_HYDRO_AREA:
if self.area_to_country[a] in self.ror_countries and 'Hydro' in self.generators_def[a]:
self.ror_reserve_areas.append(a)
self.ror_reserve_countries = []
for c in self.rescountries:
if c in self.ror_countries:
self.ror_reserve_countries.append(c)
# HYDRO GENERATORS
# store data specific to hydro reservoirs in self.hydro_data
self.hydro_data = pd.DataFrame(index=self.hydrores,columns=['reservoir','waterval'])
# reservoir capacity
for area in self.hydro_data.index:
self.hydro_data.at[area,'reservoir'] = self.reservoir_capacity[area]
# PUMP HYDRO
# areas with pumping
self.pump_areas = [
a for a in self.hydrores if a in self.opt_pump_capacity and self.opt_pump_capacity[a] > 0
]
# areas with separate reservoir for pumping
self.pump_res_areas = [a for a in self.pump_areas \
if a in self.opt_pump_reservoir and self.opt_pump_reservoir[a] > 0]
# areas with pumping in inflow reservoir
self.pump_nores_areas = [a for a in self.pump_areas if a not in self.pump_res_areas]
def setup_run_of_river(self):
prt = self.opt_print['setup']
self.ror_hourly = pd.DataFrame(dtype=float,index=self.timerange,columns=self.ror_areas)
for area in self.ror_areas:
self.ror_hourly[area] = self.opt_ror_fraction[area] * \
self.inflow_hourly.loc[self.timerange,area] * GWtoMW
if self.opt_hydro_daily:
self.ror_daily = self.ror_hourly.resample('D').sum()
def setup_inflow_feasibility(self):
for area in self.hydrores:
hgens = [g for g in self.idx_gen if self.gen_data.at[g,'area'] == area and self.gen_data.at[g,'gtype'] == 'Hydro']
pmin = self.gen_data.loc[hgens,'pmin'].sum()
if area in self.ror_areas:
minprod = np.array(self.ror_hourly[area])
minprod[minprod <= pmin] = pmin
minprod_tot = np.sum(minprod)*MWtoGW
else:
minprod_tot = pmin*self.nPeriods*MWtoGW
# hydro may also have to keep negative reserves
if self.opt_use_reserves and not self.opt_country_reserves:
# TODO: Factor 1 should be enough??
minprod_tot += self.reserve_data.at[area,'Rn']*MWtoGW*self.nPeriods*2
res_incr = TWtoGW*(self.reservoir_fix.at[self.reservoir_fix.index[1],area] - self.reservoir_fix.at[self.reservoir_fix.index[0],area])
inflow_tot = self.inflow_hourly[area].sum()
if minprod_tot + res_incr > inflow_tot:
incr_val = (minprod_tot + res_incr - inflow_tot)*1.01/self.nPeriods
print(f'WARNING: Total inflow for {area} cannot satisfy minimum production and start/end reservoir values'
+ f'\nIncreasing inflow by {incr_val:0.3f} GWh/h to avoid infeasibility!')
self.inflow_hourly[area] = self.inflow_hourly[area] + incr_val
if self.opt_hydro_daily:
self.inflow_daily[area] = self.inflow_daily[area] + incr_val*24
def setup_wind(self):
prt = self.opt_print['setup']
# wind data
if self.opt_use_maf_pecd:
self.wind_areas = [a for a in self.areas if self.opt_wind_capacity_onsh[a] or self.opt_wind_capacity_offsh[a]]
self.maf_pecd_onsh_areas = list(set([bidz2maf_pecd[a] for a in self.wind_areas if self.opt_wind_capacity_onsh[a]]))
self.maf_pecd_offsh_areas = list(set([bidz2maf_pecd[a] for a in self.wind_areas if self.opt_wind_capacity_offsh[a]]))
#% get maf wind data
self.onsh_maf = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='onshore',get_areas=self.maf_pecd_onsh_areas)
self.offsh_maf = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='offshore',get_areas=self.maf_pecd_offsh_areas)
# scale with capacity, add offshore and onshore
self.wind_maf_raw = pd.DataFrame(0.0,index=self.onsh_maf.index,columns=self.wind_areas)
for a in self.wind_maf_raw.columns:
ma = bidz2maf_pecd[a]
if self.opt_wind_capacity_onsh[a]:
self.wind_maf_raw[a] += self.onsh_maf[ma]*self.opt_wind_capacity_onsh[a]
if self.opt_wind_capacity_offsh[a]:
self.wind_maf_raw[a] += self.offsh_maf[ma]*self.opt_wind_capacity_offsh[a]
self.wind = self.copy_data_to_model_year(self.wind_maf_raw)*MWtoGW
else: # use Entso-e data
self.wind_areas = [a for a in self.areas if 'Wind' in self.entsoe_data[a]]
self.wind = pd.DataFrame(index=self.entsoe_data['SE3'].index,columns=self.wind_areas)
for a in self.wind_areas:
self.wind[a] = self.entsoe_data[a]['Wind'] * MWtoGW
self.impute_values(['wind'],limit=self.opt_impute_limit,prt=prt)
for a in self.wind_areas:
if a not in self.opt_wind_scale_factor:
self.opt_wind_scale_factor[a] = 1
def setup_solar(self):
prt = self.opt_print['setup']
if prt:
print('Setting up solar generation')
self.solar_capacity = {}
for c in ['SE','DK','NO','FI']:
for a in country_to_areas[c]:
self.solar_capacity[a] = self.load_shares.at[a]*self.opt_solar_cap_by_country[c]*MWtoGW
# adjust solar capacities with given values per area
for a in self.opt_solar_cap_by_area:
self.solar_capacity[a] = self.opt_solar_cap_by_area[a]*MWtoGW
if self.opt_use_maf_pecd:
self.solar_areas = [a for a in self.solar_capacity if self.solar_capacity[a] > 0 and a in self.areas]
self.maf_pecd_solar_areas = list(set([bidz2maf_pecd[a] for a in self.solar_areas]))
self.solar_maf_raw = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='pv',get_areas=self.maf_pecd_solar_areas)
self.solar_maf_mapped = pd.DataFrame(dtype=float,columns=self.solar_areas,index=self.solar_maf_raw.index)
for a in self.solar_areas:
self.solar_maf_mapped[a] = self.solar_maf_raw[bidz2maf_pecd[a]]*self.solar_capacity[a]
self.solar = self.copy_data_to_model_year(self.solar_maf_mapped)
else:
self.solar_areas = [a for a in self.areas if 'Solar' in self.entsoe_data[a].columns]
# use entsoe data
self.solar = | pd.DataFrame(0.0,index=self.timerange_p1,columns=self.solar_areas) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from copy import deepcopy
import warnings
from itertools import chain, combinations
from collections import Counter
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import (pearsonr as pearsonR,
spearmanr as spearmanR,
kendalltau as kendallTau)
from tqdm.auto import tqdm
import xgboost
from sklearn.base import RegressorMixin, ClassifierMixin, ClusterMixin, TransformerMixin
from sklearn.model_selection import train_test_split, BaseCrossValidator, KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import (r2_score as R2,
mean_squared_error as MSE,
roc_auc_score as ROCAUC,
confusion_matrix,
multilabel_confusion_matrix,
matthews_corrcoef as MCC,
explained_variance_score as eVar,
max_error as maxE,
mean_absolute_error as MAE,
mean_squared_log_error as MSLE,
mean_poisson_deviance as MPD,
mean_gamma_deviance as MGD,
)
from prodec.Descriptor import Descriptor
from prodec.Transform import Transform
from .reader import read_molecular_descriptors, read_protein_descriptors
from .preprocess import yscrambling
from .neuralnet import (BaseNN,
SingleTaskNNClassifier,
SingleTaskNNRegressor,
MultiTaskNNRegressor,
MultiTaskNNClassifier
)
pd.set_option('mode.chained_assignment', None)
def filter_molecular_descriptors(data: Union[pd.DataFrame, Iterator],
column_name: str,
keep_values: Iterable,
progress: bool = True,
total: Optional[int] = None) -> pd.DataFrame:
"""Filter the data so that the desired column contains only the desired data.
:param data: data to be filtered, either a dataframe or an iterator of chunks
:param column_name: name of the column to apply the filter on
:param keep_values: allowed values
:return: a pandas dataframe
"""
if isinstance(data, pd.DataFrame):
return data[data[column_name].isin(keep_values)]
elif progress:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in tqdm(data, total=total, desc='Loading molecular descriptors')],
axis=0)
else:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in data],
axis=0)
def model_metrics(model, y_true, x_test) -> dict:
"""Determine performance metrics of a model
Beware R2 = 1 - (Residual sum of squares) / (Total sum of squares) != (Pearson r)²
R2_0, R2_0_prime, K and k_prime are derived from
<NAME>., & <NAME>. (2010).
Predictive Quantitative Structure–Activity Relationships Modeling.
In <NAME> & <NAME> (Eds.),
Handbook of Chemoinformatics Algorithms.
Chapman and Hall/CRC.
https://www.taylorfrancis.com/books/9781420082999
:param model: model to check the performance of
:param y_true: true labels
:param x_test: testing set of features
:return: a dictionary of metrics
"""
y_pred = model.predict(x_test)
# Regression metrics
if isinstance(model, (RegressorMixin, SingleTaskNNRegressor, MultiTaskNNRegressor)):
# Slope of predicted vs observed
k = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(xi ** 2 for xi in y_true)
# Slope of observed vs predicted
k_prime = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(yi ** 2 for yi in y_pred)
# Mean averages
y_true_mean = y_true.mean()
y_pred_mean = y_pred.mean()
return {'number' : y_true.size,
'R2' : R2(y_true, y_pred) if len(y_pred) >= 2 else 0,
'MSE' : MSE(y_true, y_pred, squared=True) if len(y_pred) >= 2 else 0,
'RMSE' : MSE(y_true, y_pred, squared=False) if len(y_pred) >= 2 else 0,
'MSLE' : MSLE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'RMSLE' : np.sqrt(MSLE(y_true, y_pred)) if len(y_pred) >= 2 else 0,
'MAE' : MAE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Explained Variance' : eVar(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Max Error' : maxE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Poisson Distrib' : MPD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Gamma Distrib' : MGD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Pearson r': pearsonR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Spearman r' : spearmanR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Kendall tau': kendallTau(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'R2_0 (pred. vs. obs.)' : 1 - (sum((xi - k_prime * yi) **2 for xi, yi in zip(y_true, y_pred)) / sum((xi - y_true_mean) ** 2 for xi in y_true)) if len(y_pred) >= 2 else 0,
'R\'2_0 (obs. vs. pred.)' : 1 - (sum((yi - k * xi) **2 for xi, yi in zip(y_true, y_pred)) / sum((yi - y_pred_mean) ** 2 for yi in y_pred)) if len(y_pred) >= 2 else 0,
'k slope (pred. vs obs.)' : k,
'k\' slope (obs. vs pred.)' : k_prime,
}
# Classification
elif isinstance(model, (ClassifierMixin, SingleTaskNNClassifier, MultiTaskNNClassifier)):
# Binary classification
if len(model.classes_) == 2:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=model.classes_).ravel()
values = {}
try:
mcc = MCC(y_true, y_pred)
values['MCC'] = mcc
except RuntimeWarning:
pass
values[':'.join(str(x) for x in model.classes_)] = ':'.join([str(int(sum(y_true == class_))) for class_ in model.classes_])
values['ACC'] = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0
values['BACC'] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['Sensitivity'] = tp / (tp + fn) if tp + fn != 0 else 0
values['Specificity'] = tn / (tn + fp) if tn + fp != 0 else 0
values['PPV'] = tp / (tp + fp) if tp + fp != 0 else 0
values['NPV'] = tn / (tn + fn) if tn + fn != 0 else 0
values['F1'] = 2 * values['Sensitivity'] * values['PPV'] / (values['Sensitivity'] + values['PPV']) if (values['Sensitivity'] + values['PPV']) != 0 else 0
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
if y_probas.shape[1] == 1:
y_proba = y_probas.ravel()
values['AUC 1'] = ROCAUC(y_true, y_probas)
else:
for i in range(len(model.classes_)):
y_proba = y_probas[:, i].ravel()
try:
values['AUC %s' % model.classes_[i]] = ROCAUC(y_true, y_proba)
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC %s' % model.classes_[i]] = np.nan
# Multiclasses
else:
i = 0
values = {}
for contingency_matrix in multilabel_confusion_matrix(y_true, y_pred):
tn, fp, fn, tp = contingency_matrix.ravel()
try:
mcc = MCC(y_true, y_pred)
values['%s|MCC' % model.classes_[i]] = mcc
except RuntimeWarning:
pass
values['%s|number' % model.classes_[i]] = int(sum(y_true == model.classes_[i]))
values['%s|ACC' % model.classes_[i]] = (tp + tn) / (tp + tn + fp + fn) if (
tp + tn + fp + fn) != 0 else 0
values['%s|BACC' % model.classes_[i]] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['%s|Sensitivity' % model.classes_[i]] = tp / (tp + fn) if tp + fn != 0 else 0
values['%s|Specificity' % model.classes_[i]] = tn / (tn + fp) if tn + fp != 0 else 0
values['%s|PPV' % model.classes_[i]] = tp / (tp + fp) if tp + fp != 0 else 0
values['%s|NPV' % model.classes_[i]] = tn / (tn + fn) if tn + fn != 0 else 0
values['%s|F1' % model.classes_[i]] = 2 * values['%s|Sensitivity' % model.classes_[i]] * values[
'%s|PPV' % model.classes_[i]] / (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) if (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) != 0 else 0
i += 1
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
try:
values['AUC 1 vs 1'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovo")
values['AUC 1 vs All'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovr")
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC 1 vs 1'] = np.nan
values['AUC 1 vs All'] = np.nan
return values
else:
raise ValueError('model can only be classifier or regressor.')
def crossvalidate_model(data: pd.DataFrame,
model: Union[RegressorMixin, ClassifierMixin],
folds: BaseCrossValidator,
groups: List[int] = None,
verbose: bool = False
) -> Tuple[pd.DataFrame, Dict[str, Union[RegressorMixin, ClassifierMixin]]]:
"""Create a machine learning model predicting values in the first column
:param data: data containing the dependent vairable (in the first column) and other features
:param model: estimator (may be classifier or regressor) to use for model building
:param folds: cross-validator
:param groups: groups to split the labels according to
:param verbose: whether to show fold progression
:return: cross-validated performance and model trained on the entire dataset
"""
X, y = data.iloc[:, 1:], data.iloc[:, 0].values.ravel()
performance = []
if verbose:
pbar = tqdm(desc='Fitting model', total=folds.n_splits + 1)
models = {}
# Perform cross-validation
for i, (train, test) in enumerate(folds.split(X, y, groups)):
if verbose:
pbar.set_description(f'Fitting model on fold {i + 1}', refresh=True)
model.fit(X.iloc[train, :], y[train])
models[f'Fold {i + 1}'] = deepcopy(model)
performance.append(model_metrics(model, y[test], X.iloc[test, :]))
if verbose:
pbar.update()
# Organize result in a dataframe
performance = pd.DataFrame(performance)
performance.index = [f'Fold {i + 1}' for i in range(folds.n_splits)]
# Add average and sd of performance
performance.loc['Mean'] = [np.mean(performance[col]) if ':' not in col else '-' for col in performance]
performance.loc['SD'] = [np.std(performance[col]) if ':' not in col else '-' for col in performance]
# Fit model on the entire dataset
if verbose:
pbar.set_description('Fitting model on entire training set', refresh=True)
model.fit(X, y)
models['Full model'] = deepcopy(model)
if verbose:
pbar.update()
return performance, models
def train_test_proportional_group_split(data: pd.DataFrame,
groups: List[int],
test_size: float = 0.30,
verbose: bool = False
) -> Tuple[pd.DataFrame, pd.DataFrame, List[int], List[int]]:
"""Split the data into training and test sets according to the groups that respect most test_size
:param data: the data to be split up into training and test sets
:param groups: groups to split the data according to
:param test_size: approximate proportion of the input dataset to determine the test set
:param verbose: whether to log to stdout or not
:return: training and test sets and training and test groups
"""
counts = Counter(groups)
size = sum(counts.values())
# Get ordered permutations of groups without repetitions
permutations = list(chain.from_iterable(combinations(counts.keys(), r) for r in range(len(counts))))
# Get proportion of each permutation
proportions = [sum(counts[x] for x in p) / size for p in permutations]
# Get permutation minimizing difference to test_size
best, proportion = min(zip(permutations, proportions), key=lambda x: (x[1] - test_size) ** 2)
del counts, permutations, proportions
if verbose:
print(f'Best group permutation corresponds to {proportion:.2%} of the data')
# Get test set assignment
assignment = np.where(group in best for group in groups)
opposite = np.logical_not(assignment)
# Get training groups
t_groups = [x for x in groups if x not in best]
return data[opposite], data[assignment], t_groups, best
def qsar(data: pd.DataFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
descriptors: str = 'mold2',
descriptor_path: Optional[str] = None,
descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: pd.DataFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[pd.DataFrame,
Dict[str,
Optional[Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
Dict[str,
Union[RegressorMixin,
ClassifierMixin]]]]]]:
"""Create QSAR models for as many targets with selected data source(s),
data quality, minimum number of datapoints and minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: minimum number of points for the activity of a target to be modelled
:param delta_activity: minimum difference between most and least active compounds for a target to be modelled
:param descriptors: type of desriptors to be used for model training
:param descriptor_path: path to Papyrus descriptors (default: pystow's default path)
:param descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for QSAR modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be determined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a pandas DataFrame with only two Series. The first Series is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Series must be the group assignment
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log details to stdout
:return: both:
- a dataframe of the cross-validation results where each line is a fold of QSAR modelling of an accession
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, and for each accession in the data:
the fitted models on each cross-validation fold and the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not isinstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if isinstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
merge_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [merge_on, 'target_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [merge_on, 'target_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].isna()]
preserved = preserved.drop(
columns=[col for col in preserved if col not in [merge_on, 'target_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].isna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].isna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = pd.concat([preserved, active, inactive])
# Change endpoint
endpoint = 'Activity_class'
del preserved, active, inactive
# Get and merge molecular descriptors
descs = read_molecular_descriptors(descriptors, 'connectivity' not in data.columns,
version, descriptor_chunksize, descriptor_path)
descs = filter_molecular_descriptors(descs, merge_on, data[merge_on].unique())
data = data.merge(descs, on=merge_on)
data = data.drop(columns=[merge_on])
del descs
# Table of results
results, models = [], {}
targets = list(data['target_id'].unique())
n_targets = len(targets)
if verbose:
pbar = tqdm(total=n_targets, smoothing=0.1)
# Build QSAR model for targets reaching criteria
for i_target in range(n_targets - 1, -1, -1):
tmp_data = data[data['target_id'] == targets[i_target]]
if verbose:
pbar.set_description(f'Building QSAR for target: {targets[i_target]} #datapoints {tmp_data.shape[0]}',
refresh=True)
# Insufficient data points
if tmp_data.shape[0] < num_points:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
if model_type == 'regressor':
min_activity = tmp_data[endpoint].min()
max_activity = tmp_data[endpoint].max()
delta = max_activity - min_activity
# Not enough activity amplitude
if delta < delta_activity:
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Delta activity {delta} < {delta_activity}']],
columns=['target', 'number', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
else:
data_classes = Counter(tmp_data[endpoint])
# Only one activity class
if len(data_classes) == 1:
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
'Only one activity class']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Not enough data in minority class for all folds
elif not all(x >= folds for x in data_classes.values()):
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Set groups for fold enumerator and extract test set
if split_by.lower() == 'year':
groups = tmp_data['Year']
test_set = tmp_data[tmp_data['Year'] >= split_year]
if test_set.empty:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'No test data for temporal split at {split_year}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'No test data for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
training_set = tmp_data[~tmp_data.index.isin(test_set.index)]
if training_set.empty or training_set.shape[0] < folds:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Not enough training data for temporal split at {split_year}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough training data for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
if model_type == 'classifier':
train_data_classes = Counter(training_set[endpoint])
test_data_classes = Counter(test_set[endpoint])
if len(train_data_classes) < 2:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(train_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
continue
elif len(test_data_classes) < 2:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(test_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
training_groups = training_set['Year']
elif split_by.lower() == 'random':
training_groups = None
training_set, test_set = train_test_split(tmp_data, test_size=test_set_size, random_state=random_state)
elif split_by.lower() == 'cluster':
groups = cluster_method.fit_predict(tmp_data.drop(columns=features_to_ignore))
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
elif split_by.lower() == 'custom':
# Merge from custom split DataFrame
groups = tmp_data[[merge_on]].merge(custom_groups, on=merge_on).iloc[:, 1].tolist()
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
# Drop columns not used for training
training_set = training_set.drop(columns=['Year', 'target_id'])
test_set = test_set.drop(columns=['Year', 'target_id'])
X_train, y_train = training_set.drop(columns=[endpoint]), training_set.loc[:, endpoint]
X_test, y_test = test_set.drop(columns=[endpoint]), test_set.loc[:, endpoint]
# Scale data
if scale:
X_train.loc[X_train.index, X_train.columns] = scale_method.fit_transform(X_train)
X_test.loc[X_test.index, X_test.columns] = scale_method.transform(X_test)
# Encode labels
if model_type == 'classifier':
lblenc = LabelEncoder()
y_train = pd.Series(data=lblenc.fit_transform(y_train),
index=y_train.index, dtype=y_train.dtype,
name=y_train.name)
y_test = pd.Series(data=lblenc.transform(y_test),
index=y_test.index, dtype=y_test.dtype,
name=y_test.name)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
# Reorganize data
training_set = pd.concat([y_train, X_train], axis=1)
test_set = pd.concat([y_test, X_test], axis=1)
del X_train, y_train, X_test, y_test
# Y-scrambling
if yscramble:
training_set = yscrambling(data=training_set, y_var=endpoint, random_state=random_state)
test_set = yscrambling(data=test_set, y_var=endpoint, random_state=random_state)
# Make sure enough data
if model_type == 'classifier':
train_data_classes = Counter(training_set['Activity_class'])
train_enough_data = np.all(np.array(list(train_data_classes.values())) > folds)
test_data_classes = Counter(test_set['Activity_class'])
test_enough_data = np.all(np.array(list(test_data_classes.values())) > folds)
if not train_enough_data:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(train_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class of the training set for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
elif not test_enough_data:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(test_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class of the training set for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Define folding scheme for cross validation
if stratify and model_type == 'classifier':
kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
kfold = KFold(n_splits=folds, shuffle=True, random_state=random_state)
performance, cv_models = crossvalidate_model(training_set, model, kfold, training_groups)
full_model = cv_models['Full model']
X_test, y_test = test_set.iloc[:, 1:], test_set.iloc[:, 0].values.ravel()
performance.loc['Test set'] = model_metrics(full_model, y_test, X_test)
performance.loc[:, 'target'] = targets[i_target]
results.append(performance.reset_index())
models[targets[i_target]] = cv_models
if verbose:
pbar.update()
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("default", category=UserWarning)
warnings.filterwarnings("default", category=RuntimeWarning)
# Formatting return values
return_val = {}
if scale:
return_val['scaler'] = deepcopy(scale_method)
if model_type == 'classifier':
return_val['label_encoder'] = deepcopy(lblenc)
if stratify:
return_val['data_splitter'] = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
return_val['data_splitter'] = KFold(n_splits=folds, shuffle=True, random_state=random_state)
return_val = {**return_val, **models}
if len(results) is False:
return pd.DataFrame(), return_val
results = pd.concat(results, axis=0).set_index(['target', 'index'])
results.index.names = ['target', None]
return results, return_val
def pcm(data: pd.DataFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
mol_descriptors: str = 'mold2',
mol_descriptor_path: Optional[str] = None,
mol_descriptor_chunksize: Optional[int] = 50000,
prot_sequences_path: str = './',
prot_descriptors: Union[str, Descriptor, Transform] = 'unirep',
prot_descriptor_path: Optional[str] = None,
prot_descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: pd.DataFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[pd.DataFrame,
Dict[str,
Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
RegressorMixin,
ClassifierMixin]]]:
"""Create PCM models for as many targets with selected data source(s),
data quality, minimum number of datapoints and minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: minimum number of points for the activity of a target to be modelled
:param delta_activity: minimum difference between most and least active compounds for a target to be modelled
:param mol_descriptors: type of desriptors to be used for model training
:param mol_descriptor_path: path to Papyrus descriptors
:param mol_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param prot_sequences_path: path to Papyrus sequences
:param prot_descriptors: type of desriptors to be used for model training
:param prot_descriptor_path: path to Papyrus descriptors
:param prot_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for PCM modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be determined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a pandas DataFrame with only two Series. The first Series is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Series must be the group assignment
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log details to stdout
:return: both:
- a dataframe of the cross-validation results where each line is a fold of PCM modelling
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, fitted models on each cross-validation fold,
the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not isinstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if isinstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
merge_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [merge_on, 'target_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [merge_on, 'target_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].isna()]
preserved = preserved.drop(
columns=[col for col in preserved if col not in [merge_on, 'target_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].isna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].isna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = | pd.concat([preserved, active, inactive]) | pandas.concat |
# -*- coding: utf-8 -*-
"""Tracker REITs (Real Estate Investment Trust) investments."""
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import pandas as pd
import requests
import requests_cache
from .portutils import NUM_PROCESS, Singleton, UnitsTransactions
# Cache for dividends
CACHE_EXPIRE_DAYS = 15
requests_cache.install_cache(
cache_name="fiidiv",
backend="sqlite",
expire_after=datetime.timedelta(days=CACHE_EXPIRE_DAYS),
)
# Default fii portfolio csv file
# It is used if transaction file is not set
# on environment variable FII_TRANSACTIONS
CSV_FILE = "example_transactions/fii_transactions.csv"
class FiiDividends:
"""Class to handle dividends."""
URL = "https://mfinance.com.br/api/v1/fiis/dividends"
def __init__(self):
"""Initialize fii dividends class."""
self.dividends = {}
def load_dividends(self, ticker):
"""
Download all dividends paid out for a ticker.
Parameters:
ticker (str): FII ticker
"""
ticker_url = "{}/{}".format(self.URL, ticker)
print("Getting dividends: ", ticker_url)
# To not use request cache to get "current" price,
# comment next line and uncomment the other
df_tmp = pd.read_json(requests.get(ticker_url).content)
# df_tmp = pd.read_json(ticker_url)
df_dividends = | pd.json_normalize(df_tmp.dividends) | pandas.json_normalize |
from abc import abstractmethod, ABC
from pandas import DataFrame
from database.db_reader import read_table_header
class DataSource(ABC):
@staticmethod
def convert_header(table_name, dataframe):
columns = read_table_header(table_name)
dataframe.columns = columns
return dataframe
@abstractmethod
def get_stock_list(self, *args) -> DataFrame:
return | DataFrame() | pandas.DataFrame |
from PyDSS.pyContrReader import pySubscriptionReader as pySR
from PyDSS.pyLogger import getLoggerTag
from PyDSS.unitDefinations import type_info as Types
from PyDSS.unitDefinations import unit_info as Units
from PyDSS.pyContrReader import pyExportReader as pyER
from PyDSS import unitDefinations
from PyDSS.exceptions import InvalidParameter
from PyDSS.utils.dataframe_utils import write_dataframe
import pandas as pd
import numpy as np
#import helics as h
import pathlib
import gzip
import logging
import shutil
import math
import os
class ResultContainer:
def __init__(self, Options, SystemPaths, dssObjects, dssObjectsByClass, dssBuses, dssSolver, dssCommand):
if Options["Logging"]["Pre-configured logging"]:
LoggerTag = __name__
else:
LoggerTag = getLoggerTag(Options)
self.metadata_info = unitDefinations.unit_info
self.__dssDolver = dssSolver
self.Results = {}
self.CurrentResults = {}
self.pyLogger = logging.getLogger(LoggerTag)
self.Buses = dssBuses
self.ObjectsByElement = dssObjects
self.ObjectsByClass = dssObjectsByClass
self.SystemPaths = SystemPaths
self.__dssCommand = dssCommand
self.__Settings = Options
self.__StartDay = Options['Project']['Start Day']
self.__EndDay = Options['Project']['End Day']
self.__DateTime = []
self.__Frequency = []
self.__SimulationMode = []
self.__ExportFormat = Options['Exports']['Export Format']
self.__ExportCompression = Options['Exports']['Export Compression']
self.__publications = {}
self.__subscriptions = {}
self.ExportFolder = os.path.join(self.SystemPaths['Export'], Options['Project']['Active Scenario'])
pathlib.Path(self.ExportFolder).mkdir(parents=True, exist_ok=True)
if self.__Settings['Exports']['Export Mode'] == 'byElement':
self.FileReader = pyER(os.path.join(SystemPaths['ExportLists'], 'ExportMode-byElement.toml'))
self.ExportList = self.FileReader.pyControllers
self.PublicationList = self.FileReader.publicationList
self.CreateListByElement()
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
self.FileReader = pyER(os.path.join(SystemPaths['ExportLists'], 'ExportMode-byClass.toml'))
self.ExportList = self.FileReader.pyControllers
self.PublicationList = self.FileReader.publicationList
self.CreateListByClass()
if self.__Settings['Helics']['Co-simulation Mode']:
self.__createPyDSSfederate()
self.__registerFederatePublications()
self.__registerFederateSubscriptions()
h.helicsFederateEnterExecutingMode(self.__PyDSSfederate)
self.pyLogger.debug('Entered HELICS execution mode')
return
def __createPyDSSfederate(self):
fedinfo = h.helicsCreateFederateInfo()
h.helicsFederateInfoSetCoreName(fedinfo, self.__Settings['Helics']['Federate name'])
h.helicsFederateInfoSetCoreTypeFromString(fedinfo, self.__Settings['Helics']['Core type'])
h.helicsFederateInfoSetCoreInitString(fedinfo, "--federates=1")
h.helicsFederateInfoSetTimeProperty(fedinfo, h.helics_property_time_delta, self.__Settings['Helics']['Time delta'])
h.helicsFederateInfoSetIntegerProperty(fedinfo, h.helics_property_int_log_level,
self.__Settings['Helics']['Helics logging level'])
h.helicsFederateInfoSetFlagOption(fedinfo, h.helics_flag_uninterruptible, True)
self.__PyDSSfederate = h.helicsCreateValueFederate(self.__Settings['Helics']['Federate name'], fedinfo)
return
def __registerFederateSubscriptions(self):
self.FileReader = pySR(os.path.join(self.SystemPaths['ExportLists'], 'Helics-Subcriptions.xlsx'))
self.__subscriptions = self.FileReader.SubscriptionDict
for element, subscription in self.__subscriptions.items():
assert element in self.ObjectsByElement, '"{}" listed in the subscription file not '.format(element) +\
"available in PyDSS's master object dictionary."
if subscription["Subscribe"] == True:
sub = h.helicsFederateRegisterSubscription(self.__PyDSSfederate, subscription["Subscription ID"],
subscription["Unit"])
self.pyLogger.debug('PyDSS subscribing to "{}" of with units "{}"'.format(
subscription["Subscription ID"],
subscription["Unit"])
)
subscription['Subscription'] = sub
self.__subscriptions[element] = subscription
return
def updateSubscriptions(self):
for element, subscriptionData in self.__subscriptions.items():
if 'Subscription' in subscriptionData:
if subscriptionData['Data type'].lower() == 'double':
value = h.helicsInputGetDouble(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'vector':
value = h.helicsInputGetVector(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'string':
value = h.helicsInputGetString(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'boolean':
value = h.helicsInputGetBoolean(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'integer':
value = h.helicsInputGetInteger(subscriptionData['Subscription'])
dssElement = self.ObjectsByElement[element]
dssElement.SetParameter(subscriptionData['Property'], value)
self.pyLogger.debug('Value for "{}.{}" changed to "{}"'.format(
element,
subscriptionData['Property'],
value
))
return
def __registerFederatePublications(self):
self.__publications = {}
for object, property_dict in self.CurrentResults.items():
objClass = None
for Class in self.ObjectsByClass:
if object in self.ObjectsByClass[Class]:
objClass = Class
break
for property, type_dict in property_dict.items():
if '{} {}'.format(objClass, property) in self.PublicationList:
for typeID, type in type_dict.items():
name = '{}.{}.{}'.format(object, property, typeID)
self.__publications[name] = h.helicsFederateRegisterGlobalTypePublication(
self.__PyDSSfederate,
name,
type['type'],
type['unit']
)
return
def __initCurrentResults(self, PptyName):
data = {}
if PptyName in Units:
if isinstance(Units[PptyName], dict):
for subset, unit in Units[PptyName].items():
data[subset] = {
'value': None,
'unit': Units[PptyName][subset],
'type': Types[PptyName]
}
else:
data['A'] = {
'value': None,
'unit': Units[PptyName],
'type': Types[PptyName]
}
else:
data['A'] = {
'value': None,
'unit': 'NA',
'type': 'double'
}
return data
def CreateListByClass(self):
for Class, Properties in self.ExportList.items():
if Class == 'Buses':
self.Results[Class] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
self.Results[Class][PptyName] = {}
for BusName, BusObj in self.Buses.items():
if self.Buses[BusName].inVariableDict(PptyName):
self.Results[Class][PptyName][BusName] = []
if BusName not in self.CurrentResults:
self.CurrentResults[BusName] = {}
self.CurrentResults[BusName][PptyName] = self.__initCurrentResults(PptyName)
else:
if Class in self.ObjectsByClass:
self.Results[Class] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
self.Results[Class][PptyName] = {}
for ElementName, ElmObj in self.ObjectsByClass[Class].items():
if self.ObjectsByClass[Class][ElementName].IsValidAttribute(PptyName):
self.Results[Class][PptyName][ElementName] = []
if ElementName not in self.CurrentResults:
self.CurrentResults[ElementName] = {}
self.CurrentResults[ElementName][PptyName] = self.__initCurrentResults(PptyName)
return
def CreateListByElement(self):
for Element, Properties in self.ExportList.items():
if Element in self.ObjectsByElement:
self.Results[Element] = {}
self.CurrentResults[Element] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
if self.ObjectsByElement[Element].IsValidAttribute(PptyName):
self.Results[Element][PptyName] = []
self.CurrentResults[Element][PptyName] = self.__initCurrentResults(PptyName)
elif Element in self.Buses:
self.Results[Element] = {}
self.CurrentResults[Element] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
if self.Buses[Element].inVariableDict(PptyName):
self.Results[Element][PptyName] = []
self.CurrentResults[Element][PptyName] = self.__initCurrentResults(PptyName)
return
def __parse_current_values(self, Element, Property, Values):
ans = self.CurrentResults[Element][Property]
for filter, data in ans.items():
if filter == 'A':
ans[filter]['value'] = Values
elif filter == 'E':
ans[filter]['value'] = Values[0::2]
elif filter == '0':
ans[filter]['value'] = Values[1::2]
if self.__Settings['Helics']['Co-simulation Mode']:
name = '{}.{}.{}'.format(Element, Property, filter)
if isinstance(ans[filter]['value'], list) and name in self.__publications:
h.helicsPublicationPublishVector(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, float) and name in self.__publications:
h.helicsPublicationPublishDouble(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, str) and name in self.__publications:
h.helicsPublicationPublishString(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, bool) and name in self.__publications:
h.helicsPublicationPublishBoolean(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, int) and name in self.__publications:
h.helicsPublicationPublishInteger(self.__publications[name], ans[filter]['value'])
self.CurrentResults[Element][Property] = ans
return
def InitializeDataStore(self, _, __):
pass
def UpdateResults(self):
if self.__Settings['Helics']['Co-simulation Mode']:
r_seconds = self.__dssDolver.GetTotalSeconds()
print('Time: ', r_seconds)
c_seconds = 0
while c_seconds < r_seconds:
c_seconds = h.helicsFederateRequestTime(self.__PyDSSfederate, r_seconds)
self.__DateTime.append(self.__dssDolver.GetDateTime())
self.__Frequency.append(self.__dssDolver.getFrequency())
self.__SimulationMode.append(self.__dssDolver.getMode())
if self.__Settings['Exports']['Export Mode'] == 'byElement':
for Element in self.Results.keys():
for Property in self.Results[Element].keys():
if '.' in Element:
value = self.ObjectsByElement[Element].GetValue(Property)
self.Results[Element][Property].append(value)
self.__parse_current_values(Element, Property, value)
else:
value = self.Buses[Element].GetVariable(Property)
self.Results[Element][Property].append(value)
self.__parse_current_values(Element, Property, value)
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
for Class in self.Results.keys():
for Property in self.Results[Class].keys():
for Element in self.Results[Class][Property].keys():
if Class == 'Buses':
value = self.Buses[Element].GetVariable(Property)
self.Results[Class][Property][Element].append(value)
self.__parse_current_values(Element, Property, value)
else:
value = self.ObjectsByClass[Class][Element].GetValue(Property)
self.Results[Class][Property][Element].append(value)
self.__parse_current_values(Element, Property, value)
return
def ExportResults(self, fileprefix=''):
if self.__Settings['Exports']['Export Mode'] == 'byElement':
self.__ExportResultsByElements(fileprefix)
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
self.__ExportResultsByClass(fileprefix)
self.__ExportEventLog()
def FlushData(self):
pass
def max_num_bytes(self):
return 0
def __ExportResultsByClass(self, fileprefix=''):
for Class in self.Results.keys():
for Property in self.Results[Class].keys():
Class_ElementDatasets = []
PptyLvlHeader = ''
for Element in self.Results[Class][Property].keys():
ElmLvlHeader = ''
if isinstance(self.Results[Class][Property][Element][0], list):
Data = np.array(self.Results[Class][Property][Element])
for i in range(len(self.Results[Class][Property][Element][0])):
if Property in self.metadata_info:
if i % 2 == 0 and 'E' in self.metadata_info[Property]:
ElmLvlHeader += '{} ph:{} [{}],'.format(Element, math.floor(i / 2) + 1,
self.metadata_info[Property]['E'])
elif i % 2 == 1 and 'O' in self.metadata_info[Property]:
ElmLvlHeader += '{} ph:{} [{}],'.format(Element, math.floor(i / 2) + 1,
self.metadata_info[Property]['O'])
else:
ElmLvlHeader += '{}-{} [{}],'.format(Element, i, self.metadata_info[Property])
else:
ElmLvlHeader += Element + '-' + str(i) + ','
else:
Data = np.transpose(np.array([self.Results[Class][Property][Element]]))
if Property in self.metadata_info:
ElmLvlHeader = '{} [{}],'.format(Element, self.metadata_info[Property])
else:
ElmLvlHeader = Element + ','
if self.__Settings['Exports']['Export Style'] == 'Separate files':
fname = '-'.join([Class, Property, Element, str(self.__StartDay), str(self.__EndDay) ,fileprefix])
columns = [x for x in ElmLvlHeader.split(',') if x != '']
tuples = list(zip(*[self.__DateTime, self.__Frequency, self.__SimulationMode]))
index = pd.MultiIndex.from_tuples(tuples, names=['timestamp', 'frequency', 'Simulation mode'])
df = pd.DataFrame(Data, index=index, columns=columns)
if self.__ExportFormat == "h5":
df.reset_index(inplace=True)
self.__ExportDataFrame(df, os.path.join(self.ExportFolder, fname))
elif self.__Settings['Exports']['Export Style'] == 'Single file':
Class_ElementDatasets.append(Data)
PptyLvlHeader += ElmLvlHeader
if self.__Settings['Exports']['Export Style'] == 'Single file':
assert Class_ElementDatasets
Dataset = Class_ElementDatasets[0]
if len(Class_ElementDatasets) > 1:
for D in Class_ElementDatasets[1:]:
Dataset = np.append(Dataset, D, axis=1)
columns = [x for x in PptyLvlHeader.split(',') if x != '']
tuples = list(zip(*[self.__DateTime, self.__Frequency, self.__SimulationMode]))
index = pd.MultiIndex.from_tuples(tuples, names=['timestamp', 'frequency', 'Simulation mode'])
df = pd.DataFrame(Dataset, index=index, columns=columns)
if self.__ExportFormat == "h5":
df.reset_index(inplace=True)
fname = '-'.join([Class, Property, str(self.__StartDay), str(self.__EndDay), fileprefix])
self.__ExportDataFrame(df, os.path.join(self.ExportFolder, fname))
return
def __ExportResultsByElements(self, fileprefix=''):
for Element in self.Results.keys():
ElementDatasets = []
AllHeader = ''
for Property in self.Results[Element].keys():
Header = ''
if isinstance(self.Results[Element][Property][0], list):
Data = np.array(self.Results[Element][Property])
for i in range(len(self.Results[Element][Property][0])):
if Property in self.metadata_info:
if i % 2 == 0 and 'E' in self.metadata_info[Property]:
Header += '{} ph:{} [{}],'.format(Property, math.floor(i / 2) + 1,
self.metadata_info[Property]['E'])
elif i % 2 == 1 and 'O' in self.metadata_info[Property]:
Header += '{} ph:{} [{}],'.format(Property, math.floor(i / 2) + 1,
self.metadata_info[Property]['O'])
else:
Header += '{}-{} [{}],'.format(Property, i, self.metadata_info[Property])
else:
Header += Property + '-' + str(i) + ','
else:
Data = np.transpose(np.array([self.Results[Element][Property]]))
Header = Property + ','
if self.__Settings['Exports']['Export Style'] == 'Separate files':
fname = '-'.join([Element, Property, str(self.__StartDay), str(self.__EndDay), fileprefix])
columns = [x for x in Header.split(',') if x != '']
tuples = list(zip(*[self.__DateTime, self.__Frequency, self.__SimulationMode]))
index = | pd.MultiIndex.from_tuples(tuples, names=['timestamp', 'frequency', 'Simulation mode']) | pandas.MultiIndex.from_tuples |
"""ops.syncretism.io model"""
__docformat__ = "numpy"
import configparser
import logging
from typing import Tuple
import pandas as pd
import requests
import yfinance as yf
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.options import yfinance_model
logger = logging.getLogger(__name__)
accepted_orders = [
"e_desc",
"e_asc",
"iv_desc",
"iv_asc",
"md_desc",
"md_asc",
"lp_desc",
"lp_asc",
"oi_asc",
"oi_desc",
"v_desc",
"v_asc",
]
@log_start_end(log=logger)
def get_historical_greeks(
ticker: str, expiry: str, chain_id: str, strike: float, put: bool
) -> pd.DataFrame:
"""Get histoical option greeks
Parameters
----------
ticker: str
Stock ticker
expiry: str
Option expiration date
chain_id: str
OCC option symbol. Overwrites other inputs
strike: float
Strike price to look for
put: bool
Is this a put option?
Returns
-------
df: pd.DataFrame
Dataframe containing historical greeks
"""
if not chain_id:
options = yfinance_model.get_option_chain(ticker, expiry)
if put:
options = options.puts
else:
options = options.calls
chain_id = options.loc[options.strike == strike, "contractSymbol"].values[0]
r = requests.get(f"https://api.syncretism.io/ops/historical/{chain_id}")
if r.status_code != 200:
console.print("Error in request.")
return pd.DataFrame()
history = r.json()
iv, delta, gamma, theta, rho, vega, premium, price, time = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in history:
time.append( | pd.to_datetime(entry["timestamp"], unit="s") | pandas.to_datetime |
import os
import sys
import numpy as np
import pandas as pd
from pycompss.api.api import compss_wait_on
from pycompss.api.task import task
from data_managers.fundamentals_extraction import FundamentalsCollector
from data_managers.price_extraction import PriceExtractor
from data_managers.sic import load_sic
from models.classifiers import train_attrs as attrs
from settings.basic import DATE_FORMAT, DATA_PATH
from utils import load_symbol_list, save_obj, exists_obj, get_datasets_name
try:
import pyextrae.multiprocessing as pyextrae
tracing = True
except:
tracing = False
@task(returns=pd.DataFrame)
def get_prices(symbols_list_name, start_date='2006-01-01',
resample_period='1W', only_prices=False):
if tracing:
pro_f = sys.getprofile()
sys.setprofile(None)
prices = _get_prices(symbols_list_name, start_date, resample_period)
if only_prices:
res = prices.price
else:
res = prices
if tracing:
sys.setprofile(pro_f)
return res
def _get_prices(symbols_list_name, start_date='2006-01-01',
resample_period='1W'):
print("Loading prices for %s [%s - end] %s" % (
symbols_list_name, start_date, resample_period))
df_prices = PriceExtractor(symbols_list_name=symbols_list_name,
start_date=start_date).collect()
# set common index for outer join
df_prices = (df_prices
.assign(
date=lambda r: pd.to_datetime(r.date, format=DATE_FORMAT))
.set_index('date')
.groupby('symbol')
.resample(resample_period)
.ffill()
.sort_index())
return df_prices
@task(returns=pd.DataFrame)
def get_fundamentals(symbols_list_name, start_date, end_date, resample_period):
if tracing:
pro_f = sys.getprofile()
sys.setprofile(None)
print("Loading fundamentals for %s [%s - %s] %s" % (
symbols_list_name, start_date, end_date, resample_period))
df_fund = FundamentalsCollector(symbols_list_name=symbols_list_name,
start_date=start_date,
end_date=end_date).collect()
df_fund = (df_fund
.drop_duplicates(['date', 'symbol'], keep='first')
.assign(date=lambda r: pd.to_datetime(r.date, format=DATE_FORMAT))
.set_index('date')
.groupby('symbol')
.resample(resample_period)
.ffill()
.replace('nm', np.NaN)
.sort_index()
.assign(
bookvaluepershare=lambda r: pd.to_numeric(r.bookvaluepershare)))
df_fund = pd.concat(
[pd.to_numeric(df_fund[col], errors='ignore') for col in
df_fund.columns],
axis=1)
if tracing:
sys.setprofile(pro_f)
return df_fund
def process_symbol(symbol, df_fund, df_prices, sic_code, sic_industry,
thresholds, target_shift):
# TODO remove this once pyCOMPSs supports single-char parameters
symbol = symbol[:-1]
bot_thresh, top_thresh = thresholds
print("Processing symbol [%s]" % symbol)
ds = pd.concat([df_fund.loc[symbol], df_prices.loc[symbol]],
join='inner',
axis=1)
bins = pd.IntervalIndex.from_tuples(
[(-np.inf, bot_thresh), (bot_thresh, top_thresh),
(top_thresh, np.inf)])
df_tidy = (pd.DataFrame()
.assign(eps=ds.basiceps,
price=ds.price,
p2b=ds.price / ds.bookvaluepershare,
p2e=ds.price / ds.basiceps,
p2r=ds.price / ds.totalrevenue,
div2price=pd.to_numeric(
ds.cashdividendspershare) / pd.to_numeric(
ds.price),
divpayoutratio=ds.divpayoutratio,
# Performance measures
roe=ds.roe,
roic=ds.roic,
roa=ds.roa,
# Efficiency measures
assetturnover=ds.assetturnover,
invturnonver=ds.invturnover,
profitmargin=ds.profitmargin,
debtratio=ds.totalassets / ds.totalliabilities,
ebittointerestex=pd.to_numeric(
ds.ebit) / pd.to_numeric(
ds.totalinterestexpense),
# aka times-interest-earned ratio
# cashcoverage=ds.ebit + depretitation) / ds.totalinterestexpense,
# Liquidity measures
wc=ds.nwc,
wc2a=pd.to_numeric(ds.nwc) / pd.to_numeric(
ds.totalassets),
currentratio=ds.totalcurrentassets / ds.totalcurrentliabilities,
# Misc. info
symbol=symbol,
sic_info=sic_code[symbol],
sic_industry=sic_industry[symbol],
# Graham screening
revenue=ds.operatingrevenue,
epsgrowth=ds.epsgrowth,
bvps=ds.bookvaluepershare,
# Target
y=(df_prices.loc[symbol].price.shift(
-target_shift) / ds.price) - 1,
positions=lambda r: | pd.cut(r.y, bins) | pandas.cut |
__all__ = [
'PrettyPachydermClient'
]
import logging
import re
from typing import Dict, List, Iterable, Union, Optional
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas.io.formats.style as style
import pandas as pd
import numpy as np
import yaml
from IPython.core.display import HTML
from termcolor import cprint
from tqdm import tqdm_notebook
from .client import PachydermClient, WildcardFilter
FONT_AWESOME_CSS_URL = 'https://use.fontawesome.com/releases/v5.8.1/css/all.css'
CLIPBOARD_JS_URL = 'https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.js'
BAR_COLOR = '#105ecd33'
PROGRESS_BAR_COLOR = '#03820333'
# Make yaml.dump() keep the order of keys in dictionaries
yaml.add_representer(
dict,
lambda self,
data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()) # type: ignore
)
def _fa(i: str) -> str:
return f'<i class="fas fa-fw fa-{i}"></i> '
class CPrintHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord):
color = {
logging.INFO: 'green',
logging.WARNING: 'yellow',
logging.ERROR: 'red',
logging.CRITICAL: 'red',
}.get(record.levelno, 'grey')
cprint(self.format(record), color=color)
class PrettyTable(HTML):
def __init__(self, styler: style.Styler, df: pd.DataFrame):
super().__init__(data=styler.render())
self.raw = df
self.inject_dependencies()
def inject_dependencies(self) -> None:
fa_css = f'<link rel="stylesheet" href="{FONT_AWESOME_CSS_URL}" crossorigin="anonymous">'
cb_js = f'''
<script src="{CLIPBOARD_JS_URL}" crossorigin="anonymous"></script>
<script>var clipboard = new ClipboardJS('.copyable');</script>
'''
self.data = fa_css + cb_js + self.data # type: ignore
class PrettyYAML(HTML):
def __init__(self, obj: object):
super().__init__(data=self.format_yaml(obj))
self.raw = obj
@staticmethod
def format_yaml(obj: object) -> str:
s = str(yaml.dump(obj))
s = re.sub(r'(^[\s-]*)([^\s]+:)', '\\1<span style="color: #888;">\\2</span>', s, flags=re.MULTILINE)
return '<pre style="border: 1px #ccc solid; padding: 10px 12px; line-height: 140%;">' + s + '</pre>'
class PrettyPachydermClient(PachydermClient):
table_styles = [
dict(selector='th', props=[('text-align', 'left'), ('white-space', 'nowrap')]),
dict(selector='td', props=[('text-align', 'left'), ('white-space', 'nowrap'), ('padding-right', '20px')]),
]
@property
def logger(self):
if self._logger is None:
self._logger = logging.getLogger('pachypy')
self._logger.handlers = [CPrintHandler()]
self._logger.setLevel(logging.DEBUG)
self._logger.propagate = False
return self._logger
def list_repos(self, repos: WildcardFilter = '*') -> PrettyTable:
df = super().list_repos(repos=repos)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'is_tick': 'Tick',
'branches': 'Branches',
'size_bytes': 'Size',
'created': 'Created',
}, axis=1, inplace=True)
df['Tick'] = df['Tick'].map({True: _fa('stopwatch'), False: ''})
df['Branches'] = df['Branches'].apply(', '.join)
styler = df[['Repo', 'Tick', 'Branches', 'Size', 'Created']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({'Created': self._format_datetime, 'Size': self._format_size}) \
.set_properties(subset=['Branches'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_commits(self, repos: WildcardFilter, n: int = 10) -> PrettyTable:
df = super().list_commits(repos=repos, n=n)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'commit': 'Commit',
'branches': 'Branch',
'size_bytes': 'Size',
'started': 'Started',
'finished': 'Finished',
'parent_commit': 'Parent Commit',
}, axis=1, inplace=True)
styler = df[['Repo', 'Commit', 'Branch', 'Size', 'Started', 'Finished', 'Parent Commit']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({
'Commit': self._format_hash,
'Parent Commit': self._format_hash,
'Branch': ', '.join,
'Started': self._format_datetime,
'Finished': self._format_datetime,
'Size': self._format_size
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_files(self, repos: WildcardFilter, branch: Optional[str] = 'master', commit: Optional[str] = None,
glob: str = '**', files_only: bool = True) -> PrettyTable:
df = super().list_files(repos=repos, branch=branch, commit=commit, glob=glob, files_only=files_only)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'branches': 'Branch',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = df[['Repo', 'Commit', 'Branch', 'Type', 'Path', 'Size', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({
'Type': self._format_file_type,
'Size': self._format_size,
'Commit': self._format_hash,
'Branch': ', '.join,
'Committed': self._format_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_pipelines(self, pipelines: WildcardFilter = '*') -> PrettyTable:
df = super().list_pipelines(pipelines=pipelines)
dfr = df.copy()
df['sort_key'] = df.index.map(self._calc_pipeline_sort_key(df['input_repos'].to_dict()))
df.sort_values('sort_key', inplace=True)
df.rename({
'pipeline': 'Pipeline',
'state': 'State',
'cron_spec': 'Cron',
'cron_prev_tick': 'Last Tick',
'cron_next_tick': 'Next Tick',
'input': 'Input',
'output_branch': 'Output',
'datum_tries': 'Tries',
'created': 'Created',
}, axis=1, inplace=True)
df.loc[df['jobs_running'] > 0, 'State'] = 'job running'
now = datetime.now(self.user_timezone)
df['Next Tick In'] = (now - df['Next Tick']).dt.total_seconds() * -1
df['Parallelism'] = ''
df.loc[df['parallelism_constant'] > 0, 'Parallelism'] = \
_fa('hashtag') + df['parallelism_constant'].astype(str)
df.loc[df['parallelism_coefficient'] > 0, 'Parallelism'] = \
_fa('asterisk') + df['parallelism_coefficient'].astype(str)
df['Jobs'] = \
'<span style="color: green">' + df['jobs_success'].astype(str) + '</span>' + \
np.where(df['jobs_failure'] > 0, ' + <span style="color: red">' + df['jobs_failure'].astype(str) + '</span>', '')
styler = df[['Pipeline', 'State', 'Cron', 'Next Tick In', 'Input', 'Output', 'Parallelism', 'Jobs', 'Created']].style \
.apply(self._style_pipeline_state, subset=['State']) \
.format({
'State': self._format_pipeline_state,
'Cron': self._format_cron_spec,
'Next Tick In': self._format_duration,
'Created': self._format_datetime,
}) \
.set_properties(subset=['Input'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_jobs(self, pipelines: WildcardFilter = '*', n: int = 20, hide_null_jobs: bool = True) -> PrettyTable:
df = super().list_jobs(pipelines=pipelines, n=n, hide_null_jobs=hide_null_jobs)
dfr = df.copy()
df.rename({
'job': 'Job',
'pipeline': 'Pipeline',
'state': 'State',
'started': 'Started',
'duration': 'Duration',
'restart': 'Restarts',
'download_bytes': 'Downloaded',
'upload_bytes': 'Uploaded',
'output_commit': 'Output Commit',
}, axis=1, inplace=True)
df['Duration'] = df['Duration'].dt.total_seconds()
df['Progress'] = \
df['progress'].fillna(0).apply(lambda x: f'{x:.0%}') + ' | ' + \
'<span style="color: green">' + df['data_processed'].astype(str) + '</span>' + \
np.where(df['data_skipped'] > 0, ' + <span style="color: purple">' + df['data_skipped'].astype(str) + '</span>', '') + \
' / <span>' + df['data_total'].astype(str) + '</span>'
styler = df[['Job', 'Pipeline', 'State', 'Started', 'Duration', 'Progress', 'Restarts', 'Downloaded', 'Uploaded', 'Output Commit']].style \
.bar(subset=['Duration'], color=BAR_COLOR, vmin=0) \
.apply(self._style_job_state, subset=['State']) \
.apply(self._style_job_progress, subset=['Progress']) \
.format({
'Job': self._format_hash,
'State': self._format_job_state,
'Started': self._format_datetime,
'Duration': self._format_duration,
'Restarts': lambda i: _fa('undo') + str(i) if i > 0 else '',
'Downloaded': self._format_size,
'Uploaded': self._format_size,
'Output Commit': self._format_hash
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_datums(self, job: str) -> PrettyTable:
df = super().list_datums(job=job)
dfr = df.copy()
df.rename({
'job': 'Job',
'datum': 'Datum',
'state': 'State',
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = df[['Job', 'Datum', 'State', 'Repo', 'Type', 'Path', 'Size', 'Commit', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.apply(self._style_datum_state, subset=['State']) \
.format({
'Job': self._format_hash,
'Datum': self._format_hash,
'State': self._format_datum_state,
'Type': self._format_file_type,
'Size': self._format_size,
'Commit': self._format_hash,
'Committed': self._format_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def get_logs(self, pipelines: WildcardFilter = '*', datum: Optional[str] = None,
last_job_only: bool = True, user_only: bool = False, master: bool = False, tail: int = 0) -> None:
df = super().get_logs(pipelines=pipelines, last_job_only=last_job_only, user_only=user_only, master=master, tail=tail)
job = None
worker = None
for _, row in df.iterrows():
if row.job != job:
print()
cprint(f' Pipeline {row.pipeline} ' + (f'| Job {row.job} ' if row.job else ''), 'yellow', 'on_grey')
if row.worker != worker:
cprint(f' Worker {row.worker} ', 'white', 'on_grey')
color = 'grey' if row.user else 'blue'
message = row.message
if 'warning' in message.lower():
color = 'magenta'
elif 'error' in message.lower() or 'exception' in message.lower() or 'critical' in message.lower():
color = 'red'
cprint(f'[{row.ts}] {message}', color)
job = row.job
worker = row.worker
def inspect_repo(self, repo: str) -> PrettyYAML:
info = super().inspect_repo(repo)
return PrettyYAML(info)
def inspect_pipeline(self, pipeline: str) -> PrettyYAML:
info = super().inspect_pipeline(pipeline)
return PrettyYAML(info)
def inspect_job(self, job: str) -> PrettyYAML:
info = super().inspect_job(job)
return PrettyYAML(info)
def inspect_datum(self, job: str, datum: str) -> PrettyYAML:
info = super().inspect_datum(job, datum)
return PrettyYAML(info)
@staticmethod
def _calc_pipeline_sort_key(input_repos: Dict[str, List[str]]):
def get_dag_distance(p, i=0):
yield i
for d in input_repos[p]:
if d in pipelines:
yield from get_dag_distance(d, i + 1)
def get_dag_dependencies(p):
yield p
for d in input_repos[p]:
if d in pipelines:
yield from get_dag_dependencies(d)
pipelines = set(input_repos.keys())
dag_distance = {p: max(list(get_dag_distance(p))) for p in pipelines}
dag_nodes = {p: set(get_dag_dependencies(p)) for p in pipelines}
for p, nodes in dag_nodes.items():
for node in nodes:
dag_nodes[node].update(nodes)
dag_name = {p: min(nodes) for p, nodes in dag_nodes.items()}
return {p: f'{dag_name[p]}/{dag_distance[p]}' for p in pipelines}
def _format_datetime(self, d: datetime) -> str:
if pd.isna(d):
return ''
td = (datetime.now(self.user_timezone).date() - d.date()).days
word = {-1: 'Tomorrow', 0: 'Today', 1: 'Yesterday'}
return (word[td] if td in word else f'{d:%-d %b %Y}') + f' at {d:%H:%M}'
@staticmethod
def _format_duration(secs: float, n: int = 2) -> str:
if pd.isna(secs):
return ''
d = relativedelta(seconds=int(secs), microseconds=int((secs % 1) * 1e6))
attrs = {
'years': 'years',
'months': 'months',
'days': 'days',
'hours': 'hours',
'minutes': 'mins',
'seconds': 'secs',
'microseconds': 'ms'
}
ret = ''
i = 0
for attr, attr_short in attrs.items():
x = getattr(d, attr, 0)
if x > 0:
if attr == 'microseconds':
x /= 1000
u = attr_short
else:
u = x != 1 and attr_short or attr_short[:-1]
ret += f'{x:.0f} {u}, '
i += 1
if i >= n or attr in {'minutes', 'seconds'}:
break
return ret.strip(', ')
@staticmethod
def _format_size(x: Union[int, float]) -> str:
if abs(x) == 1:
return f'{x:.0f} byte'
if abs(x) < 1000.0:
return f'{x:.0f} bytes'
x /= 1000.0
for unit in ['KB', 'MB', 'GB', 'TB']:
if abs(x) < 1000.0:
return f'{x:.1f} {unit}'
x /= 1000.0
return f'{x:,.1f} PB'
@staticmethod
def _format_hash(s: str) -> str:
if pd.isna(s):
return ''
short = s[:5] + '..' + s[-5:] if len(s) > 12 else s
return f'<pre class="copyable" title="{s} (click to copy)" data-clipboard-text="{s}" style="cursor: copy; background: none; white-space: nowrap;">{short}</pre>'
@staticmethod
def _format_cron_spec(s: str) -> str:
if pd.isna(s) or s == '':
return ''
return _fa('stopwatch') + s
@staticmethod
def _format_file_type(s: str) -> str:
return {
'file': _fa('file') + s,
'dir': _fa('folder') + s,
}.get(s, s)
@staticmethod
def _format_pipeline_state(s: str) -> str:
return {
'starting': _fa('spinner') + s,
'restarting': _fa('undo') + s,
'running': _fa('toggle-on') + s,
'job running': _fa('running') + s,
'failure': _fa('bolt') + s,
'paused': _fa('toggle-off') + s,
'standby': _fa('power-off') + s,
}.get(s, s)
@staticmethod
def _format_job_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'running': _fa('running') + s,
'merging': _fa('compress-arrows-alt') + s,
'success': _fa('check') + s,
'failure': _fa('bolt') + s,
'killed': _fa('skull-crossbones') + s,
}.get(s, s)
@staticmethod
def _format_datum_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'skipped': _fa('forward') + s,
'success': _fa('check') + s,
'failed': _fa('bolt') + s,
}.get(s, s)
@staticmethod
def _style_pipeline_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'restarting': 'orange',
'running': 'green',
'job running': 'purple',
'failure': 'red',
'paused': 'orange',
'standby': '#0251c9',
}
return [f"color: {color.get(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'running': 'orange',
'merging': 'orange',
'success': 'green',
'failure': 'red',
'killed': 'red',
}
return [f"color: {color.get(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_datum_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'skipped': '#0251c9',
'success': 'green',
'failed': 'red',
}
return [f"color: {color.get(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_progress(s: pd.Series) -> List[str]:
def css_bar(end):
css = 'width: 10em; height: 80%;'
if end > 0:
css += 'background: linear-gradient(90deg,'
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(e=min(end, 100), c=PROGRESS_BAR_COLOR)
return css
s = s.apply(lambda x: float(x.split('%')[0]))
return [css_bar(x) if not | pd.isna(x) | pandas.isna |
import numpy as np
import pandas as pd
from typing import List
from astropy.time import Time
from .orbits import Orbits
from .utils import assignPatchesSquare
__all__ = [
"findAverageOrbits",
"findTestOrbitsPatch",
"selectTestOrbits",
]
def findAverageOrbits(
ephemeris: pd.DataFrame,
orbits: pd.DataFrame,
d_values: list = None,
element_type: str = "keplerian",
) -> pd.DataFrame:
"""
Find the object with observations that represents
the most average in terms of cartesian velocity and the
heliocentric distance. Assumes that a subset of the designations in the orbits
dataframe are identical to at least some of the designations in the observations
dataframe. No propagation is done, so the orbits need to be defined at an epoch near
the time of observations, for example like the midpoint or start of a two-week window.
Parameters
----------
ephemeris : `~pandas.DataFrame`
DataFrame containing simulated ephemerides.
orbits : `~pandas.DataFrame`
DataFrame containing orbits for each unique object in observations.
d_values : {list (N>=2), None}, optional
If None, will find average orbit in all of observations. If a list, will find an
average orbit between each value in the list. For example, passing d_values = [1.0, 2.0, 4.0] will
mean an average orbit will be found in the following bins: (1.0 <= d < 2.0), (2.0 <= d < 4.0).
element_type : {'keplerian', 'cartesian'}, optional
Find average orbits using which elements. If 'keplerian' will use a-e-i for average,
if 'cartesian' will use r, v.
[Default = 'keplerian']
Returns
-------
average_orbits : `~pandas.DataFrame`
Orbits selected from
"""
if element_type == "keplerian":
d_col = "a"
elif element_type == "cartesian":
d_col = "r_au"
else:
err = (
"element_type should be one of {'keplerian', 'cartesian'}"
)
raise ValueError(err)
dataframe = pd.merge(orbits, ephemeris, on="orbit_id", how="right").copy()
d_bins = []
if d_values != None:
for d_i, d_f in zip(d_values[:-1], d_values[1:]):
d_bins.append(dataframe[(dataframe[d_col] >= d_i) & (dataframe[d_col] < d_f)])
else:
d_bins.append(dataframe)
average_orbits = []
for i, obs in enumerate(d_bins):
if len(obs) == 0:
continue
if element_type == "cartesian":
rv = obs[["vx", "vy", "vz", d_col]].values
median = np.median(rv, axis=0)
percent_diff = np.abs((rv - median) / median)
else:
aie = obs[["a", "i", "e"]].values
median = np.median(aie, axis=0)
percent_diff = np.abs((aie - median) / median)
# Sum the percent differences
summed_diff = np.sum(percent_diff, axis=1)
# Find the minimum summed percent difference and call that
# the average object
index = np.where(summed_diff == np.min(summed_diff))[0][0]
orbit_id = obs["orbit_id"].values[index]
average_orbits.append(orbit_id)
average_orbits = orbits[orbits["orbit_id"].isin(average_orbits)]
average_orbits.reset_index(
inplace=True,
drop=True
)
return average_orbits
def findTestOrbitsPatch(ephemeris: pd.DataFrame) -> pd.DataFrame:
"""
Find test orbits for a patch of ephemerides.
Parameters
----------
ephemeris : `~pandas.DataFrame`
DataFrame containing predicted ephemerides (including aberrated cartesian state
vectors) of an input catalog of orbits for a patch or small region of the
sky.
Returns
-------
test_orbits : `~pandas.DataFrame` (<=9)
Up to 9 test orbits for the given patch of ephemerides.
"""
observation_times = Time(
ephemeris["mjd_utc"].values,
scale="utc",
format="mjd"
)
orbits = Orbits(
ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values,
observation_times,
ids=ephemeris["orbit_id"],
orbit_type="cartesian"
)
orbits_df = orbits.to_df(
include_units=False,
include_keplerian=True
)
patch_id = ephemeris["patch_id"].unique()[0]
test_orbits_hun1_patch = findAverageOrbits(
ephemeris,
orbits_df[(orbits_df["a"] < 2.06) & (orbits_df["a"] >= 1.7) & (orbits_df["e"] <= 0.1)],
element_type="keplerian",
d_values=[1.7, 2.06]
)
test_orbits_hun2_patch = findAverageOrbits(
ephemeris,
orbits_df[(orbits_df["a"] < 2.06) & (orbits_df["a"] >= 1.7) & (orbits_df["e"] > 0.1) & (orbits_df["e"] <= 0.2)],
element_type="keplerian",
d_values=[1.7, 2.06]
)
test_orbits_hun3_patch = findAverageOrbits(
ephemeris,
orbits_df[(orbits_df["a"] < 2.06) & (orbits_df["a"] >= 1.7) & (orbits_df["e"] > 0.2) & (orbits_df["e"] <= 0.4)],
element_type="keplerian",
d_values=[1.7, 2.06]
)
test_orbits_patch = findAverageOrbits(
ephemeris,
orbits_df[(orbits_df["e"] < 0.5)],
element_type="keplerian",
d_values=[2.06, 2.5, 2.82, 2.95, 3.27, 5.0, 50.0],
)
test_orbits_patch = pd.concat(
[
test_orbits_hun1_patch,
test_orbits_hun2_patch,
test_orbits_hun3_patch,
test_orbits_patch
],
ignore_index=True
)
test_orbits_patch.insert(0, "patch_id", patch_id)
test_orbits_patch["r"] = np.linalg.norm(test_orbits_patch[["x", "y", "z"]].values, axis=1)
test_orbits_patch.sort_values(
by=["r"],
inplace=True
)
return test_orbits_patch
def findTestOrbits_worker(ephemeris_list: List[pd.DataFrame]) -> pd.DataFrame:
"""
Find test orbits for a given list of patches of ephemerides.
Parameters
----------
ephemeris_list : list[`~pandas.DataFrame`]
Small patches of ephemerides for which to find test orbits.
Returns
-------
test_orbits : `~pandas.DataFrame`
Test orbits for the given ephemerides.
"""
test_orbits_list = []
for ephemeris in ephemeris_list:
test_orbits_patch = findTestOrbitsPatch(ephemeris)
test_orbits_list.append(test_orbits_patch)
if len(test_orbits_list) > 0:
test_orbits = pd.concat(test_orbits_list, ignore_index=True)
else:
test_orbits = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = | pd.to_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
import nltk
import numpy as np
import pandas as pd
import bokeh as bk
from math import pi
from collections import Counter
from bokeh.transform import cumsum
from bokeh.palettes import Category20c
from bokeh.models.glyphs import VBar
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.io import curdoc, show
from bokeh.core.properties import value
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import file_html
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import subjectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
from pyramid_restful.viewsets import APIViewSet
from pyramid.response import Response
from pyramid.view import view_config
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def stacked_bar_for_one(data):
""" Chart display for one analysis/one user.
"""
if data == {}:
return 'There is not data for this user'
analysis_df = pd.DataFrame()
user_id = data.keys()
sentence_counter = 0
key_list = []
for key in user_id:
for one_record in data[key]:
record_obj = json.loads(one_record)
for sentence in record_obj['Sentences']:
# key_list.append(sentence)
ss = record_obj['Sentences'][sentence]
ss['sentence'] = sentence
columns = ['neg', 'neu', 'pos', 'compound', 'sentence']
sentence_counter += 1
key_list.append(str(sentence_counter))
index = [sentence_counter]
temp = pd.DataFrame(ss, columns=columns, index=index)
analysis_df = pd.concat([analysis_df, temp], sort=True)
output_file("stacked.html")
emotions = ['Negative', 'Neutral', 'Positive']
data = {'Sentences': analysis_df.index,
'Negative': analysis_df.neg,
'Neutral': analysis_df.neu,
'Positive': analysis_df.pos}
colors = ["#e84d60", "#c9d9d3", "#718dbf"]
p = figure(y_range=(0, 1.2), plot_height=500, title="Sentiment Analysis",
toolbar_location=None, tools="")
p.vbar_stack(emotions, x='Sentences', width=0.9, color=colors, source=data,
legend=[value(x) for x in emotions])
p.y_range.start = 0
p.x_range.range_padding = 0.2
p.xaxis.axis_label = 'Sentences'
p.yaxis.axis_label = 'Percentage (%)'
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
html = file_html(p, CDN, "Single User Stacked Bar")
return html
def stacked_bar_for_all(data):
""" Chart display for get analysis for all users combined.
This is for the admin to view a collection of user's analysis """
if data == {}:
return 'There is no data in the database'
analysis_df = pd.DataFrame()
user_id = data.keys()
sentence_counter = 0
key_list = []
for key in user_id:
for one_record in data[key]:
record_obj = json.loads(one_record)
for sentence in record_obj['Sentences']:
# key_list.append(sentence)
ss = record_obj['Sentences'][sentence]
ss['sentence'] = sentence
columns = ['neg', 'neu', 'pos', 'compound', 'sentence']
sentence_counter += 1
key_list.append(str(sentence_counter))
index = [sentence_counter]
temp = pd.DataFrame(ss, columns=columns, index=index)
analysis_df = | pd.concat([analysis_df, temp], sort=True) | pandas.concat |
import pandas as pd
# import copy
from pathlib import Path
import pickle
pd.set_option('display.max_colwidth', -1)
pd.options.display.max_rows = 999
pd.options.mode.chained_assignment = None
import numpy as np
import math
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn import preprocessing
from scipy.stats import boxcox
import statsmodels.api as sm
# https://www.statsmodels.org/stable/api.html
from linearmodels import PooledOLS
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels.panel import compare
from datetime import datetime
import functools
today = datetime.today()
yearmonth = today.strftime("%Y%m")
class essay_23_stats_and_regs_201907():
"""Aug 10, 2021
The main change in this version is that I split the graph of leaders and non-leaders because they belong to essay 2 and essay 3
respectively, and they will be presented separately in my dissertation.
"""
initial_panel = '201907'
all_panels = ['201907',
'201908',
'201909',
'201912',
'202001',
'202003',
'202004',
'202009',
'202010',
'202011',
'202012',
'202101',
'202102',
'202103',
'202104',
'202105',
'202106']
panel_root = Path(
'/home/naixin/Insync/na<EMAIL>.cn/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__')
des_stats_root = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY')
des_stats_both_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2_3_common___/descriptive_stats/tables')
des_stats_leaders_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_3___/descriptive_stats/tables')
des_stats_non_leaders_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2___/descriptive_stats/tables')
common_path = Path(
'/home/naixin/Insync/[email protected]/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__/___essay_2_3_common___')
name1_path_keywords = {'Non-leaders': '___essay_2___',
'Leaders': '___essay_3___'}
graph_name1_titles = {
'Leaders': 'Market Leaders and 5 Main Functional App Categories',
'Non-leaders': 'Market Followers and 5 Main Functional App Categories'
}
name12_graph_title_dict = {'Leaders_full': 'Market Leaders Full Sample',
'Leaders_category_GAME': 'Market Leaders Gaming Apps',
'Leaders_category_BUSINESS': 'Market Leaders Business Apps',
'Leaders_category_SOCIAL': 'Market Leaders Social Apps',
'Leaders_category_LIFESTYLE': 'Market Leaders Lifestyle Apps',
'Leaders_category_MEDICAL': 'Market Leaders Medical Apps',
'Non-leaders_full': 'Market Followers Full Sample',
'Non-leaders_category_GAME': 'Market Followers Gaming Apps',
'Non-leaders_category_BUSINESS': 'Market Followers Business Apps',
'Non-leaders_category_SOCIAL': 'Market Followers Social Apps',
'Non-leaders_category_LIFESTYLE': 'Market Followers Lifestyle Apps',
'Non-leaders_category_MEDICAL': 'Market Followers Medical Apps'}
name12_reg_table_names = {'Leaders_full': 'Leaders \nFull',
'Leaders_category_GAME': 'Leaders \nGaming Apps',
'Leaders_category_BUSINESS': 'Leaders \nBusiness Apps',
'Leaders_category_SOCIAL': 'Leaders \nSocial Apps',
'Leaders_category_LIFESTYLE': 'Leaders \nLifestyle Apps',
'Leaders_category_MEDICAL': 'Leaders \nMedical Apps',
'Non-leaders_full': 'Followers \nFull',
'Non-leaders_category_GAME': 'Followers \nGaming Apps',
'Non-leaders_category_BUSINESS': 'Followers \nBusiness Apps',
'Non-leaders_category_SOCIAL': 'Followers \nSocial Apps',
'Non-leaders_category_LIFESTYLE': 'Followers \nLifestyle Apps',
'Non-leaders_category_MEDICAL': 'Followers \nMedical Apps'}
graph_dep_vars_ylabels = {
'Imputedprice': 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise',
'ImputedminInstalls': 'Minimum Installs',
'LogImputedminInstalls': 'Log Minimum Installs',
'both_IAP_and_ADS': 'Percentage Points',
'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP',
'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads',
'offersIAPTrue': 'Percentage of Apps Offers IAP',
'containsAdsTrue': 'Percentage of Apps Contains Ads'
}
graph_dep_vars_titles = {
'Imputedprice': 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted With White Noise',
'ImputedminInstalls': 'Minimum Installs',
'LogImputedminInstalls': 'Log Minimum Installs',
'both_IAP_and_ADS': 'Percentage of Apps that Offers IAP and Contains Ads',
'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP',
'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads',
'offersIAPTrue': 'Percentage of Apps Offers IAP',
'containsAdsTrue': 'Percentage of Apps Contains Ads'
}
dep_vars_reg_table_names = {
'Imputedprice' : 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise',
'ImputedminInstalls': 'Minimum Installs',
'LogImputedminInstalls': 'Log Minimum Installs',
'containsAdsTrue': 'Contains Ads',
'offersIAPTrue': 'Offers IAP'
}
text_cluster_size_bins = [0, 1, 2, 3, 5, 10, 20, 30, 50, 100, 200, 500, 1500]
text_cluster_size_labels = ['[0, 1]', '(1, 2]', '(2, 3]', '(3, 5]',
'(5, 10]', '(10, 20]', '(20, 30]', '(30, 50]',
'(50, 100]', '(100, 200]', '(200, 500]', '(500, 1500]']
combined_text_cluster_size_bins = [0, 10, 30, 100, 500, 1500]
combined_text_cluster_size_labels = ['[0, 10]', '(10, 30]', '(30, 100]', '(100, 500]', '(500, 1500]']
group_by_var_x_label = {'NicheDummy' : 'Niche vs. Broad',
'cluster_size_bin': 'Size of K-Means Text Clusters'}
all_y_reg_vars = ['LogWNImputedprice',
'LogImputedminInstalls',
'offersIAPTrue',
'containsAdsTrue']
@property
def ssnames(self):
d = self._open_predicted_labels_dict()
res = dict.fromkeys(d.keys())
for name1, content1 in d.items():
res[name1] = list(content1.keys())
return res
@property
def graph_name1_ssnames(self):
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
l = []
for name2 in content1:
l.append(name1 + '_' + name2)
res[name1] = l
return res
@classmethod
def _select_vars(cls, df,
time_variant_vars_list=None,
time_invariant_vars_list=None):
df2 = df.copy(deep=True)
tv_var_list = []
if time_variant_vars_list is not None:
for i in time_variant_vars_list:
vs = [i + '_' + j for j in cls.all_panels]
tv_var_list = tv_var_list + vs
ti_var_list = []
if time_invariant_vars_list is not None:
for i in time_invariant_vars_list:
ti_var_list.append(i)
total_vars = tv_var_list + ti_var_list
df2 = df2[total_vars]
return df2
@classmethod
def _open_imputed_deleted_divided_df(cls):
f_name = cls.initial_panel + '_imputed_deleted_subsamples.pickle'
q = cls.common_path / f_name
with open(q, 'rb') as f:
df = pickle.load(f)
return df
@classmethod
def _open_predicted_labels_dict(cls):
f_name = cls.initial_panel + '_predicted_labels_dict.pickle'
q = cls.common_path / 'predicted_text_labels' / f_name
with open(q, 'rb') as f:
d = pickle.load(f)
return d
@classmethod
def _open_app_level_text_cluster_stats(cls):
filename = cls.initial_panel + '_dict_app_level_text_cluster_stats.pickle'
q = cls.common_path / 'app_level_text_cluster_stats' / filename
with open(q, 'rb') as f:
d = pickle.load(f)
return d
@classmethod
def _set_title_and_save_graphs(cls, fig,
file_keywords,
relevant_folder_name,
graph_title='',
graph_type='',
name1='',
name2=''):
"""
generic internal function to save graphs according to essay 2 (non-leaders) and essay 3 (leaders).
name1 and name2 are the key names of essay_1_stats_and_regs_201907.ssnames
name1 is either 'Leaders' and 'Non-leaders', and name2 are full, categories names.
graph_title is what is the graph is.
"""
# ------------ set title -------------------------------------------------------------------------
if graph_title != '':
if name1 != '' and name2 != '':
title = cls.initial_panel + ' ' + cls.name12_graph_title_dict[
name1 + '_' + name2] + ' \n' + graph_title
else:
title = cls.initial_panel + ' ' + graph_title
title = title.title()
fig.suptitle(title, fontsize='medium')
# ------------ save ------------------------------------------------------------------------------
filename = cls.initial_panel + '_' + name1 + '_' + name2 + '_' + file_keywords + '_' + graph_type + '.png'
fig.savefig(cls.des_stats_root / cls.name1_path_keywords[name1] / 'descriptive_stats' / 'graphs' / relevant_folder_name / filename,
facecolor='white',
dpi=300)
def __init__(self,
tcn,
combined_df=None,
broad_niche_cutoff=None,
broadDummy_labels=None,
reg_results=None):
self.tcn = tcn
self.cdf = combined_df
self.broad_niche_cutoff = broad_niche_cutoff
self.broadDummy_labels = broadDummy_labels
self.reg_results = reg_results
def open_cross_section_reg_df(self):
filename = self.initial_panel + '_cross_section_df.pickle'
q = self.common_path / 'cross_section_dfs' / filename
with open(q, 'rb') as f:
self.cdf = pickle.load(f)
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _numApps_per_cluster(self):
d2 = self._open_predicted_labels_dict()
d = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
for name2 in d[name1].keys():
label_col_name = name1 + '_' + name2 + '_kmeans_labels'
s2 = d2[name1][name2].groupby(
[label_col_name]).size(
).sort_values(
ascending=False)
d[name1][name2] = s2.rename('Apps Count').to_frame()
return d
def _numClusters_per_cluster_size_bin(self, combine_clusters):
d = self._numApps_per_cluster()
res = dict.fromkeys(d.keys())
for k1, content1 in d.items():
res[k1] = dict.fromkeys(content1.keys())
for k2, df in content1.items():
df2 = df.copy(deep=True)
# since the min number of apps in a cluster is 1, not 0, so the smallest range (0, 1] is OK.
# there is an option include_loweest == True, however, it will return float, but I want integer bins, so I will leave it
# cannot set retbins == True because it will override the labels
if combine_clusters is True:
df3 = df2.groupby(pd.cut(x=df2.iloc[:, 0],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
).count()
else:
df3 = df2.groupby(pd.cut(x=df2.iloc[:, 0],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
).count()
df3.rename(columns={'Apps Count': 'Clusters Count'}, inplace=True)
res[k1][k2] = df3
return res
def _numApps_per_cluster_size_bin(self, combine_clusters):
d1 = self._numApps_per_cluster()
d3 = self._open_predicted_labels_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
res[name1] = dict.fromkeys(content1)
for name2 in content1:
df = d3[name1][name2].copy(deep=True)
# create a new column indicating the number of apps in the particular cluster for that app
predicted_label_col = name1 + '_' + name2 + '_kmeans_labels'
df['numApps_in_cluster'] = df[predicted_label_col].apply(
lambda x: d1[name1][name2].loc[x])
# create a new column indicating the size bin the text cluster belongs to
if combine_clusters is True:
df['cluster_size_bin'] = pd.cut(
x=df['numApps_in_cluster'],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
else:
df['cluster_size_bin'] = pd.cut(
x=df['numApps_in_cluster'],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
# create a new column indicating grouped sum of numApps_in_cluster for each cluster_size
df2 = df.groupby('cluster_size_bin').count()
df3 = df2.iloc[:, 0].to_frame()
df3.columns = ['numApps_in_cluster_size_bin']
res[name1][name2] = df3
return res
def determine_niche_broad_cutoff(self):
d = self._numApps_per_cluster()
self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys())
self.broadDummy_labels = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
self.broad_niche_cutoff[name1] = dict.fromkeys(content1)
self.broadDummy_labels[name1] = dict.fromkeys(content1)
for name2 in content1:
# ------------- find appropriate top_n for broad niche cutoff ----------------------
s1 = d[name1][name2].to_numpy()
s_multiples = np.array([])
for i in range(len(s1) - 1):
multiple = s1[i] / s1[i + 1]
s_multiples = np.append(s_multiples, multiple)
# top_n equals to the first n numbers that are 2
top_n = 0
if len(s_multiples) > 2:
for i in range(len(s_multiples) - 2):
if s_multiples[i] >= 2 and top_n == i:
top_n += 1
elif s_multiples[i + 1] >= 1.5 and top_n == 0:
top_n += 2
elif s_multiples[i + 2] >= 1.5 and top_n == 0:
top_n += 3
elif s_multiples[0] <= 1.1 and top_n == 0:
top_n += 2
else:
if top_n == 0:
top_n = 1
else:
top_n = 1
self.broad_niche_cutoff[name1][name2] = top_n
self.broadDummy_labels[name1][name2] = d[name1][name2][:top_n].index.tolist()
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def text_cluster_stats_at_app_level(self, combine_clusters):
d1 = self._open_predicted_labels_dict()
d2 = self._numApps_per_cluster()
d3 = self._numClusters_per_cluster_size_bin(combine_clusters)
d4 = self._numApps_per_cluster_size_bin(combine_clusters)
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
res[name1] = dict.fromkeys(content1)
for name2 in content1:
df = d1[name1][name2].copy(deep=True)
# set column names with name1 and name2 for future joining
predicted_label = name1 + '_' + name2 + '_kmeans_labels'
numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster'
cluster_size_bin = name1 + '_' + name2 + '_cluster_size_bin'
numClusters_in_cluster_size_bin = name1 + '_' + name2 + '_numClusters_in_cluster_size_bin'
numApps_in_cluster_size_bin = name1 + '_' + name2 + '_numApps_in_cluster_size_bin'
# create a new column indicating the number of apps in the particular cluster for that app
# (do not forget to use .squeeze() here because .loc will return a pandas series)
df[numApps_in_cluster] = df[predicted_label].apply(
lambda x: d2[name1][name2].loc[x].squeeze())
# create a new column indicating the size bin the text cluster belongs to
if combine_clusters is True:
df[cluster_size_bin] = pd.cut(
x=df[numApps_in_cluster],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
else:
df[cluster_size_bin] = pd.cut(
x=df[numApps_in_cluster],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
# create a new column indicating number of cluster for each cluster size bin
df[numClusters_in_cluster_size_bin] = df[cluster_size_bin].apply(
lambda x: d3[name1][name2].loc[x].squeeze())
# create a new column indicating grouped sum of numApps_in_cluster for each cluster_size
df[numApps_in_cluster_size_bin] = df[cluster_size_bin].apply(
lambda x: d4[name1][name2].loc[x].squeeze())
res[name1][name2] = df
filename = self.initial_panel + '_dict_app_level_text_cluster_stats.pickle'
q = self.common_path / 'app_level_text_cluster_stats' / filename
pickle.dump(res, open(q, 'wb'))
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def combine_app_level_text_cluster_stats_with_df(self):
df = self._open_imputed_deleted_divided_df()
d = self._open_app_level_text_cluster_stats()
x1 = d['Leaders']['full'].copy(deep=True)
x2 = d['Non-leaders']['full'].copy(deep=True)
x3 = x1.join(x2, how='outer')
list_of_dfs = [x3]
for name1, content1 in d.items():
for name2, stats_df in content1.items():
if name2 != 'full':
list_of_dfs.append(stats_df)
combined_stats_df = functools.reduce(lambda a, b: a.join(b, how='left'), list_of_dfs)
self.cdf = df.join(combined_stats_df, how='inner')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def check_text_label_contents(self):
df2 = self.cdf.copy(deep=True)
d = self._open_predicted_labels_dict()
for name1, content in d.items():
for name2, text_label_col in content.items():
label_col_name = name1 + '_' + name2 + '_kmeans_labels'
unique_labels = df2[label_col_name].unique().tolist()
unique_labels = [x for x in unique_labels if math.isnan(x) is False]
print(name1, name2, ' -- unique text labels are --')
print(unique_labels)
print()
for label_num in unique_labels:
df3 = df2.loc[df2[label_col_name]==label_num, [self.tcn + 'ModeClean']]
if len(df3.index) >= 10:
df3 = df3.sample(n=10)
f_name = self.initial_panel + '_' + name1 + '_' + name2 + '_' + 'TL_' + str(label_num) + '_' + self.tcn + '_sample.csv'
q = self.common_path / 'check_predicted_label_text_cols' / f_name
df3.to_csv(q)
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _text_cluster_group_count(self):
df2 = self.cdf.copy(deep=True)
d = dict.fromkeys(self.ssnames.keys())
self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys())
self.nicheDummy_labels = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
self.broad_niche_cutoff[name1] = dict.fromkeys(content1)
self.nicheDummy_labels[name1] = dict.fromkeys(content1)
for name2 in d[name1].keys():
label_col_name = name1 + '_' + name2 + '_kmeans_labels'
# ------------- find appropriate top_n for broad niche cutoff ----------------------
s1 = df2.groupby([label_col_name]).size().sort_values(ascending=False).to_numpy()
s_multiples = np.array([])
for i in range(len(s1)-1):
multiple = s1[i]/s1[i+1]
s_multiples = np.append(s_multiples, multiple)
# top_n equals to the first n numbers that are 2
top_n = 0
for i in range(len(s_multiples)-2):
if s_multiples[i] >= 2 and top_n == i:
top_n += 1
elif s_multiples[i+1] >= 1.5 and top_n == 0:
top_n += 2
elif s_multiples[i+2] >= 1.5 and top_n == 0:
top_n += 3
elif s_multiples[0] <= 1.1 and top_n == 0:
top_n += 2
else:
if top_n == 0:
top_n = 1
self.broad_niche_cutoff[name1][name2] = top_n
s2 = df2.groupby([label_col_name]).size().sort_values(ascending=False)
s3 = s2.iloc[:self.broad_niche_cutoff[name1][name2], ]
self.nicheDummy_labels[name1][name2] = s3.index.tolist()
# ------------- convert to frame ---------------------------------------------------
d[name1][name2] = df2.groupby([label_col_name]).size(
).sort_values(ascending=False).rename(name1 + '_' + name2 + '_Apps_Count').to_frame()
return d
def _get_xy_var_list(self, name1, name2, y_var, the_panel=None):
"""
:param name1: leaders non-leaders
:param name2: all categories
:param y_var: 'Imputedprice','ImputedminInstalls','offersIAPTrue','containsAdsTrue'
:param log_y: for price and mininstalls, log = True
:return:
"""
time_invar_controls = ['size', 'DaysSinceReleased']
x_var = [name1 + '_' + name2 + '_NicheDummy']
if the_panel is None:
time_var_controls = ['Imputedscore_' + i for i in self.all_panels] + \
['Imputedreviews_' + i for i in self.all_panels]
y_var = [y_var + '_' + i for i in self.all_panels]
else:
time_var_controls = ['Imputedscore_' + the_panel, 'Imputedreviews_' + the_panel]
y_var = [y_var + '_' + the_panel]
all_vars = y_var + x_var + time_invar_controls + time_var_controls
return all_vars
def _slice_xy_df_for_subsamples(self, y_var, the_panel=None, log_y=False):
d = self._slice_subsamples_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res[name1] = dict.fromkeys(content1.keys())
for name2, df in content1.items():
var_list = self._get_xy_var_list(name1=name1, name2=name2, y_var=y_var, the_panel=the_panel)
if log_y is False:
res[name1][name2] = df[var_list]
else:
df2 = df[var_list]
if the_panel is None:
for i in self.all_panels:
df2['Log' + y_var + '_' + i] = np.log2(df2[y_var + '_' + i] + 1)
df2.drop([y_var + '_' + i], axis=1, inplace=True)
else:
df2['Log' + y_var + '_' + the_panel] = np.log2(df2[y_var + '_' + the_panel] + 1)
df2.drop([y_var + '_' + the_panel], axis=1, inplace=True)
res[name1][name2] = df2
return res
def _slice_subsamples_dict(self):
"""
:param vars: a list of variables you want to subset
:return:
"""
df = self.cdf.copy(deep=True)
d = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
df2 = df.loc[df[name1]==1]
for name2 in content1:
if name2 == 'full':
d[name1][name2] = df2
else:
d[name1][name2] = df2.loc[df2[name2]==1]
return d
def _cross_section_reg_get_xy_var_list(self, name1, name2, y_var, the_panel):
"""
:param y_var: 'LogWNImputedprice','LogImputedminInstalls','offersIAPTrue','containsAdsTrue'
:return:
"""
time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult']
x_var = [name1 + '_' + name2 + '_NicheDummy']
time_var_controls = ['Imputedscore_' + the_panel,
'ZScoreImputedreviews_' + the_panel]
y_var = [y_var + '_' + the_panel]
all_vars = y_var + x_var + time_invar_controls + time_var_controls
print(name1, name2, the_panel)
print('cross section reg x and y variables are :')
print(all_vars)
return all_vars
def _panel_reg_get_xy_var_list(self, name1, name2, y_var):
time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult']
x_var = [name1 + '_' + name2 + '_NicheDummy']
time_var_x_vars = [name1 + '_' + name2 + '_PostXNicheDummy_' + i for i in self.all_panels] + \
['PostDummy_' + i for i in self.all_panels]
time_var_controls = ['DeMeanedImputedscore_' + i for i in self.all_panels] + \
['DeMeanedZScoreImputedreviews_' + i for i in self.all_panels]
y_var = [y_var + '_' + i for i in self.all_panels]
all_vars = y_var + x_var + time_var_x_vars + time_invar_controls + time_var_controls
print(name1, name2)
print('panel reg x and y variables are :')
print(all_vars)
return all_vars
def _cross_section_regression(self, y_var, df, the_panel):
"""
https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.RegressionResults.html#statsmodels.regression.linear_model.RegressionResults
#https://www.statsmodels.org/stable/rlm.html
https://stackoverflow.com/questions/30553838/getting-statsmodels-to-use-heteroskedasticity-corrected-standard-errors-in-coeff
source code for HC0, HC1, HC2, and HC3, white and Mackinnon
https://www.statsmodels.org/dev/_modules/statsmodels/regression/linear_model.html
https://timeseriesreasoning.com/contents/zero-inflated-poisson-regression-model/
"""
# check the correlation among variables
# dfcorr = df.corr(method='pearson').round(2)
# print('The correlation table of the cross section regression dataframe is:')
# print(dfcorr)
# print()
all_vars = df.columns.values.tolist()
# y_var is a string without panel substring
for i in all_vars:
if y_var in i:
all_vars.remove(i)
independents_df = df[all_vars]
X = sm.add_constant(independents_df)
y = df[[y_var + '_' + the_panel]]
num_dep_var_unique_values = y.nunique().squeeze()
print(y_var, 'contains', str(num_dep_var_unique_values), 'unqiue values.')
# I found for leaders medical category group that there is only zeros in y, so OLS does not apply
# generally, price is pre-dominantly zeros, so use zero inflated regression instead
if y_var == 'LogImputedprice':
print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ')
model = sm.ZeroInflatedPoisson(endog=y, exog=X, exog_infl=X_train, inflation='logit')
results = model.fit()
else:
model = sm.OLS(y, X)
results = model.fit(cov_type='HC3')
return results
def _panel_reg_pooled_ols(self,
y_var, df):
"""
Internal function
return a dictionary containing all different type of panel reg results
I will not run fixed effects model here because they will drop time-invariant variables.
In addition, I just wanted to check whether for the time variant variables, the demeaned time variant variables
will have the same coefficient in POOLED OLS as the time variant variables in FE.
"""
all_vars = df.columns.values.tolist()
# y_var is a string without panel substring
for i in all_vars:
if y_var in i:
all_vars.remove(i)
independents_df = df[all_vars]
X = sm.add_constant(independents_df)
y = df[[y_var]]
# check if there is any variability in Y variable
# for example, leaders category Medical LogImputedprice has zeros in all its columns
num_dep_var_unique_values = y.nunique().squeeze()
if num_dep_var_unique_values == 1:
print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ')
return None
else:
# https://bashtage.github.io/linearmodels/panel/panel/linearmodels.panel.model.PanelOLS.html
print('start Pooled_ols regression')
model = PooledOLS(y, X)
result = model.fit(cov_type='clustered', cluster_entity=True)
return result
def _reg_for_all_subsamples_for_single_y_var(self, reg_type, y_var):
data = self._slice_subsamples_dict()
if reg_type == 'cross_section_ols':
reg_results = dict.fromkeys(self.all_panels)
for i in self.all_panels:
reg_results[i] = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
reg_results[i][name1] = dict.fromkeys(content1)
for name2 in content1:
allvars = self._cross_section_reg_get_xy_var_list(
name1=name1,
name2=name2,
y_var=y_var,
the_panel=i)
df = data[name1][name2][allvars]
print(name1, name2, 'Cross Section Regression -- First Check Correlations')
reg_results[i][name1][name2] = self._cross_section_regression(
y_var=y_var,
df=df,
the_panel=i)
for i in self.all_panels:
self._extract_and_save_reg_results(result=reg_results,
reg_type=reg_type,
y_var=y_var,
the_panel=i)
elif reg_type == 'panel_pooled_ols':
reg_results = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
reg_results[name1] = dict.fromkeys(content1)
for name2 in content1:
allvars = self._panel_reg_get_xy_var_list(
name1=name1,
name2=name2,
y_var=y_var)
# ---------- convert to long for panel regression --------------------
df = data[name1][name2][allvars]
stubnames = [name1 + '_' + name2 + '_PostXNicheDummy', 'PostDummy',
y_var, 'DeMeanedImputedscore', 'DeMeanedZScoreImputedreviews']
df = df.reset_index()
ldf = pd.wide_to_long(
df,
stubnames=stubnames,
i=['index'],
j="panel",
sep='_').reset_index()
ldf["panel"] = pd.to_datetime(ldf["panel"], format='%Y%m')
ldf = ldf.sort_values(by=["index", "panel"]).set_index('index')
ldf = ldf.reset_index().set_index(['index', 'panel'])
reg_results[name1][name2] = self._panel_reg_pooled_ols(y_var=y_var, df=ldf)
self._extract_and_save_reg_results(result=reg_results,
reg_type=reg_type,
y_var=y_var)
else:
reg_results = {}
return reg_results
def reg_for_all_subsamples_for_all_y_vars(self, reg_type):
res = dict.fromkeys(self.all_y_reg_vars)
for y in self.all_y_reg_vars:
res[y] = self._reg_for_all_subsamples_for_single_y_var(reg_type=reg_type, y_var=y)
self.reg_results = res
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _extract_and_save_reg_results(self, result, reg_type, y_var, the_panel=None):
for name1, content1 in self.ssnames.items():
for name2 in content1:
# ---------- specify the rows to extract ---------------
index_to_extract = {
'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'],
'panel_pooled_ols': [
'const',
name1 + '_' + name2 + '_NicheDummy',
'PostDummy',
name1 + '_' + name2 + '_PostXNicheDummy']
}
# ---------- get the coefficients ----------------------
if reg_type == 'cross_section_ols':
x = result[the_panel][name1][name2].params
else:
x = result[name1][name2].params
x = x.to_frame()
x.columns = ['parameter']
y = x.loc[index_to_extract[reg_type]]
# ---------- get the pvalues ---------------------------
if reg_type == 'cross_section_ols':
z1 = result[the_panel][name1][name2].pvalues
else:
z1 = result[name1][name2].pvalues
z1 = z1.to_frame()
z1.columns = ['pvalue']
z2 = z1.loc[index_to_extract[reg_type]]
y2 = y.join(z2, how='inner')
y2 = y2.round(3)
if the_panel is None:
filename = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '.csv'
else:
filename = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '_' + the_panel + '.csv'
y2.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_results_tables' / filename)
print(name1, name2, 'Reg results are saved in the reg_results_tables folder')
def _create_cross_section_reg_results_df_for_parallel_trend_beta_graph(self, alpha):
"""
possible input for reg_type are: 'cross_section_ols', uses self._cross_section_regression()
alpha = 0.05 for 95% CI of coefficients
"""
# all dependant variables in one dictionary
res_results = dict.fromkeys(self.all_y_reg_vars)
# all subsamples are hue in the same graph
for y_var in self.all_y_reg_vars:
res_results[y_var] = self.reg_results[y_var]
# since every reg result is one row in dataframe
res_df = dict.fromkeys(self.all_y_reg_vars)
for y_var, panels in res_results.items():
# order in lists are persistent (unlike sets or dictionaries)
panel_content = []
sub_samples_content = []
beta_nichedummy_content = []
ci_lower = []
ci_upper = []
for panel, subsamples in panels.items():
for name1, content1 in subsamples.items():
for name2, reg_result in content1.items():
panel_content.append(panel)
sub_samples_content.append(name1 + '_' + name2)
nichedummy = name1 + '_' + name2 + '_NicheDummy'
beta_nichedummy_content.append(reg_result.params[nichedummy])
ci_lower.append(reg_result.conf_int(alpha=alpha).loc[nichedummy, 0])
ci_upper.append(reg_result.conf_int(alpha=alpha).loc[nichedummy, 1])
d = {'panel': panel_content,
'sub_samples': sub_samples_content,
'beta_nichedummy': beta_nichedummy_content,
'ci_lower': ci_lower,
'ci_upper': ci_upper}
df = pd.DataFrame(data=d)
# create error bars (positive distance away from beta) for easier ax.errorbar graphing
df['lower_error'] = df['beta_nichedummy'] - df['ci_lower']
df['upper_error'] = df['ci_upper'] - df['beta_nichedummy']
# sort by panels
df["panel"] = pd.to_datetime(df["panel"], format='%Y%m')
df["panel"] = df["panel"].dt.strftime('%Y-%m')
df = df.sort_values(by=["panel"])
res_df[y_var] = df
return res_df
def _put_reg_results_into_pandas_for_single_y_var(self, reg_type, y_var, the_panel=None):
"""
:param result: is the output of self._reg_for_all_subsamples(
reg_type='panel_pooled_ols',
y_var=any one of ['LogWNImputedprice', 'LogImputedminInstalls', 'offersIAPTrue', 'containsAdsTrue'])
the documentation of the PanelResult class (which result is)
:return:
"""
# ============= 1. extract results info and put them into dicts ==================
params_pvalues_dict = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
params_pvalues_dict[name1] = dict.fromkeys(content1)
for name2 in content1:
# ---------- specify the rows to extract ---------------
index_to_extract = {
'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'],
'panel_pooled_ols': [
'const',
name1 + '_' + name2 + '_NicheDummy',
'PostDummy',
name1 + '_' + name2 + '_PostXNicheDummy']
}
# ---------- get the coefficients ----------------------
if reg_type == 'cross_section_ols':
x = self.reg_results[y_var][the_panel][name1][name2].params
else:
x = self.reg_results[y_var][name1][name2].params
x = x.to_frame()
x.columns = ['parameter']
y = x.loc[index_to_extract[reg_type]]
# ---------- get the pvalues ---------------------------
if reg_type == 'cross_section_ols':
z1 = self.reg_results[y_var][the_panel][name1][name2].pvalues
else:
z1 = self.reg_results[y_var][name1][name2].pvalues
z1 = z1.to_frame()
z1.columns = ['pvalue']
z2 = z1.loc[index_to_extract[reg_type]]
def _assign_asterisk(v):
if 0.05 < v <= 0.1:
return '*'
elif 0.01 < v <= 0.05:
return '**'
elif v <= 0.01:
return '***'
else:
return ''
z2['asterisk'] = z2['pvalue'].apply(lambda x: _assign_asterisk(x))
y2 = y.join(z2, how='inner')
y2['parameter'] = y2['parameter'].round(3).astype(str)
y2['parameter'] = y2['parameter'] + y2['asterisk']
y2.rename(index={'const': 'Constant',
name1 + '_' + name2 + '_NicheDummy': 'Niche',
'PostDummy': 'Post',
name1 + '_' + name2 + '_PostXNicheDummy': 'PostNiche'},
inplace=True)
y2 = y2.reset_index()
y2.drop(columns=['pvalue', 'asterisk'], inplace=True)
y2.insert(0, 'Samples', [name1 + '_' + name2] * len(y2.index))
y2['Samples'] = y2['Samples'].apply(lambda x: self.name12_reg_table_names[x] if x in self.name12_reg_table_names.keys() else 'None')
y2.rename(columns={'index': 'Independent Vars',
'parameter': self.dep_vars_reg_table_names[y_var]},
inplace=True)
params_pvalues_dict[name1][name2] = y2
# ========= concatenate dataframes into a single dataframe for each name1 ==========
res = dict.fromkeys(params_pvalues_dict.keys())
for name1, content1 in params_pvalues_dict.items():
df_list = []
for name12, df in content1.items():
df_list.append(df)
adf = functools.reduce(lambda a, b: a.append(b), df_list)
res[name1] = adf
return res
def put_reg_results_into_pandas_for_all_y_var(self, reg_type, the_panel=None):
res1 = dict.fromkeys(self.all_y_reg_vars)
if reg_type == 'cross_section_ols':
for y in self.all_y_reg_vars:
res1[y] = self._put_reg_results_into_pandas_for_single_y_var(reg_type=reg_type,
y_var=y,
the_panel=the_panel)
else:
for y in self.all_y_reg_vars:
res1[y] = self._put_reg_results_into_pandas_for_single_y_var(reg_type=reg_type, y_var=y)
res2 = dict.fromkeys(self.ssnames.keys())
for name1 in res2.keys():
df_list = []
for y in self.all_y_reg_vars:
df_list.append(res1[y][name1])
adf = functools.reduce(lambda a, b: a.merge(b, how='inner',
on=['Samples', 'Independent Vars']),
df_list)
print(adf)
filename = name1 + '_' + reg_type + '_reg_results.csv'
adf.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_tables_ready_for_latex' / filename)
res2[name1] = adf
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numApps_per_text_cluster(self):
"""
This graph has x-axis as the order rank of text clusters, (for example we have 250 text clusters, we order them from 0 to 249, where
0th text cluster contains the largest number of apps, as the order rank increases, the number of apps contained in each cluster
decreases, the y-axis is the number of apps inside each cluster).
Second meeting with Leah discussed that we will abandon this graph because the number of clusters are too many and they
are right next to each other to further right of the graph.
"""
d = self._numApps_per_cluster()
for name1, content1 in d.items():
for name2, content2 in content1.items():
df3 = content2.reset_index()
df3.columns = ['cluster_labels', 'Apps Count']
# -------------- plot ----------------------------------------------------------------
fig, ax = plt.subplots()
# color the top_n bars
# after sort descending, the first n ranked clusters (the number in broad_niche_cutoff) is broad
color = ['red'] * self.broad_niche_cutoff[name1][name2]
# and the rest of all clusters are niche
rest = len(df3.index) - self.broad_niche_cutoff[name1][name2]
color.extend(['blue'] * rest)
df3.plot.bar( x='cluster_labels',
xlabel='Text Clusters',
y='Apps Count',
ylabel='Apps Count',
ax=ax,
color=color)
# customize legend
BRA = mpatches.Patch(color='red', label='broad apps')
NIA = mpatches.Patch(color='blue', label='niche apps')
ax.legend(handles=[BRA, NIA], loc='upper right')
ax.axes.xaxis.set_ticks([])
ax.yaxis.set_ticks_position('right')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid(True)
# label the top n clusters
df4 = df3.iloc[:self.broad_niche_cutoff[name1][name2], ]
for index, row in df4.iterrows():
value = round(row['Apps Count'])
ax.annotate(value,
(index, value),
xytext=(0, 0.1), # 2 points to the right and 15 points to the top of the point I annotate
textcoords='offset points')
plt.xlabel("Text Clusters")
plt.ylabel('Apps Count')
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numApps_count',
name1=name1,
name2=name2,
# graph_title='Histogram of Apps Count In Each Text Cluster',
relevant_folder_name = 'numApps_per_text_cluster')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numClusters_per_cluster_size_bin(self, combine_clusters):
res = self._numClusters_per_cluster_size_bin(combine_clusters)
for name1, content1 in res.items():
for name2, dfres in content1.items():
dfres.reset_index(inplace=True)
dfres.columns = ['cluster_size_bin', 'Clusters Count']
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
dfres.plot.bar( x='cluster_size_bin',
xlabel = 'Cluster Sizes Bins',
y='Clusters Count',
ylabel = 'Clusters Count', # default will show no y-label
rot=40, # rot is **kwarg rotation for ticks
grid=False, # because the default will add x grid, so turn it off first
legend=None, # remove legend
ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig
)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.grid() # since pandas parameter grid = False or True, no options, so I will modify here
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numClusters_count',
name1=name1,
name2=name2,
# graph_title='Histogram of Clusters In Each Cluster Size Bin',
relevant_folder_name='numClusters_per_cluster_size_bin')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numApps_per_cluster_size_bin(self, combine_clusters):
res = self._numApps_per_cluster_size_bin(combine_clusters)
for name1, content1 in res.items():
for name2, dfres in content1.items():
dfres.reset_index(inplace=True)
dfres.columns = ['cluster_size_bin', 'numApps_in_cluster_size_bin']
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
dfres.plot.bar( x='cluster_size_bin',
xlabel = 'Cluster Size Bins',
y='numApps_in_cluster_size_bin',
ylabel = 'Apps Count', # default will show no y-label
rot=40, # rot is **kwarg rotation for ticks
grid=False, # because the default will add x grid, so turn it off first
legend=None, # remove legend
ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig
)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.grid() # since pandas parameter grid = False or True, no options, so I will modify here
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numApps_per_cluster_size_bin',
name1=name1,
name2=name2,
# graph_title='Histogram of Apps Count In Each Cluster Size Bin',
relevant_folder_name='numApps_per_cluster_size_bin')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _groupby_subsample_dfs_by_nichedummy(self):
d = self._slice_subsamples_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res[name1] = dict.fromkeys(content1.keys())
for name2, df in content1.items():
niche_dummy = name1 + '_' + name2 + '_NicheDummy'
df2 = df.groupby([niche_dummy]).size().to_frame()
df2.rename(columns={0: name1 + '_' + name2}, index={0: 'Broad Apps', 1: 'Niche Apps'}, inplace=True)
res[name1][name2] = df2
return res
def _combine_name2s_into_single_df(self, name12_list, d):
"""
:param name2_list: such as ['full_full', 'minInstalls_Tier1', 'minInstalls_Tier2', 'minInstalls_Tier3']
:param d: the dictionary of single subsample df containing stats
:return:
"""
df_list = []
for name1, content1 in d.items():
for name2, df in content1.items():
name12 = name1 + '_' + name2
if name12 in name12_list:
df_list.append(df)
df2 = functools.reduce(lambda a, b: a.join(b, how='inner'), df_list)
l = df2.columns.tolist()
str_to_replace = {'Non-leaders': '',
'Leaders': '',
'category': '',
'_': ' '}
for col in l:
new_col = col
for k, v in str_to_replace.items():
new_col = new_col.replace(k, v)
new_col = new_col.title()
df2.rename(columns={col: new_col}, inplace=True)
df2.loc["Total"] = df2.sum(axis=0)
df2 = df2.sort_values(by='Total', axis=1, ascending=False)
df2 = df2.drop(labels='Total')
df2 = df2.T
return df2
def niche_by_subsamples_bar_graph(self, name1=None):
# each sub-sample is a horizontal bar in a single graph
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(left=0.2)
# -------------------------------------------------------------------------
res = self._groupby_subsample_dfs_by_nichedummy()
df = self._combine_name2s_into_single_df(name12_list=self.graph_name1_ssnames[name1],
d=res)
f_name = name1 + '_niche_by_subsamples_bar_graph.csv'
if name1 == 'Leaders':
q = self.des_stats_leaders_tables / f_name
else:
q = self.des_stats_non_leaders_tables / f_name
df.to_csv(q)
# -------------------------------------------------------------------------
df.plot.barh(stacked=True,
color={"Broad Apps": "orangered",
"Niche Apps": "lightsalmon"},
ax=ax)
ax.set_ylabel('Sub-samples')
ax.set_yticklabels(ax.get_yticklabels())
ax.set_xlabel('Apps Count')
ax.xaxis.grid()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# graph_title = self.initial_panel + ' ' + self.graph_name1_titles[name1] + \
# '\n Apps Count by Niche and Broad Types'
# ax.set_title(graph_title)
ax.legend()
# ------------------ save file -----------------------------------------------------------------
self._set_title_and_save_graphs(fig=fig,
name1=name1,
file_keywords=self.graph_name1_titles[name1].lower().replace(' ', '_'),
relevant_folder_name='nichedummy_count_by_subgroup')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _prepare_pricing_vars_for_graph_group_by_var(self,
group_by_var,
the_panel=None):
"""
group_by_var could by either "NicheDummy" or "cluster_size_bin"
the dataframe (self.cdf) is after the function combine_app_level_text_cluster_stats_with_df
"""
key_vars = ['Imputedprice',
'LogImputedprice',
# use this for regression and descriptive stats because it added uniform white noise to avoid 0 price
'LogWNImputedprice',
'ImputedminInstalls',
'LogImputedminInstalls',
'offersIAPTrue',
'containsAdsTrue']
if the_panel is not None:
selected_vars = [i + '_' + the_panel for i in key_vars]
else:
selected_vars = [i + '_' + j for j in self.all_panels for i in key_vars]
d = self._slice_subsamples_dict()
res12 = dict.fromkeys(self.ssnames.keys())
res34 = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res12[name1] = dict.fromkeys(content1.keys())
res34[name1] = dict.fromkeys(content1.keys())
for name2, df in content1.items():
# ---- prepare regular df with log transformed imputedprice and imputed mininstalls --------
text_label_var = name1 + '_' + name2 + '_kmeans_labels'
numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster'
group_by_var_name = name1 + '_' + name2 + '_' + group_by_var
# ------------------------------------------------------------------------------------------
svars = selected_vars + [text_label_var,
group_by_var_name,
numApps_in_cluster]
df2 = df[svars]
# change niche 0 1 to Broad and Niche for clearer table and graphing
if group_by_var == 'NicheDummy':
df2.loc[df2[group_by_var_name] == 1, group_by_var_name] = 'Niche'
df2.loc[df2[group_by_var_name] == 0, group_by_var_name] = 'Broad'
if the_panel is not None:
res12[name1][name2] = df2
else:
# ---------- when no panel is specified, you will need the long form ----------------------
df2 = df2.reset_index()
ldf = pd.wide_to_long(
df2,
stubnames=key_vars,
i=['index'],
j="panel",
sep='_').reset_index()
ldf["panel"] = pd.to_datetime(ldf["panel"], format='%Y%m')
ldf["panel"] = ldf["panel"].dt.strftime('%Y-%m')
ldf = ldf.sort_values(by=["index", "panel"]).set_index('index')
res12[name1][name2] = ldf
# ------ prepare df consisting of percentage True in each text cluster size bin for offersIAP and containsAds ------
if the_panel is not None:
panel_var_list = ['offersIAPTrue_' + the_panel, 'containsAdsTrue_' + the_panel]
panel_value_var_list = ['TRUE_offersIAPTrue_' + the_panel, 'TRUE_containsAdsTrue_' + the_panel]
else:
panel_var_list = ['offersIAPTrue_' + i for i in self.all_panels] + \
['containsAdsTrue_' + i for i in self.all_panels]
panel_value_var_list = ['TRUE_offersIAPTrue_' + i for i in self.all_panels] + \
['TRUE_containsAdsTrue_' + i for i in self.all_panels]
# calculate the percentage True
df_list = []
for var in panel_var_list:
df3 = pd.crosstab( index=df2[group_by_var_name],
columns=[df2[var]],
margins=True)
# for cases where only column 1 or column 0 exist for a sub text cluster or niche dummy group
if 1 not in df3.columns:
print(name1, name2, the_panel, var, 'column 1 does not exist.')
df3[1] = 0
print('created column 1 with zeros. ')
if 0 not in df3.columns:
print(name1, name2, the_panel, var, 'column 0 does not exist.')
df3[0] = 0
print('created column 0 with zeros. ')
df3['TRUE_' + var] = df3[1] / df3['All'] * 100
df3['FALSE_' + var] = df3[0] / df3['All'] * 100
df3['TOTAL_' + var] = df3['TRUE_' + var] + df3['FALSE_' + var]
df_list.append(df3[['TRUE_' + var]])
df4 = functools.reduce(lambda a, b: a.join(b, how='inner'), df_list)
df4['TOTAL'] = 100 # because the text cluster group that do not exist are not in the rows, so TOTAL% is 100
df4.drop(index='All', inplace=True)
total = df2.groupby(group_by_var_name)[var].count().to_frame()
total.rename(columns={var: 'Total_Count'}, inplace=True)
df5 = total.join(df4, how='left').fillna(0)
df5.drop(columns='Total_Count', inplace=True)
df5.reset_index(inplace=True)
if the_panel is not None:
# ------- reshape to have seaborn hues (only for cross section descriptive stats) --------------------
# conver to long to have hue for different dependant variables
df6 = pd.melt(df5,
id_vars=[group_by_var_name, "TOTAL"],
value_vars=panel_value_var_list)
df6.rename(columns={'value': 'TRUE', 'variable': 'dep_var'}, inplace=True)
df6['dep_var'] = df6['dep_var'].str.replace('TRUE_', '', regex=False)
res34[name1][name2] = df6
else:
# convert to long to have hue for different niche or non-niche dummies
ldf = pd.wide_to_long(
df5,
stubnames=['TRUE_offersIAPTrue', 'TRUE_containsAdsTrue'],
i=[group_by_var_name],
j="panel",
sep='_').reset_index()
ldf["panel"] = pd.to_datetime(ldf["panel"], format='%Y%m')
ldf["panel"] = ldf["panel"].dt.strftime('%Y-%m')
ldf = ldf.sort_values(by=["panel"])
res34[name1][name2] = ldf
return res12, res34
def graph_histogram_pricing_vars_by_niche(self, name1, the_panel):
res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var(
group_by_var='NicheDummy',
the_panel=the_panel)
key_vars = ['LogImputedprice', 'Imputedprice', 'LogWNImputedprice',
'LogImputedminInstalls', 'ImputedminInstalls']
# --------------------------------------- graph -------------------------------------------------
for i in range(len(key_vars)):
fig, ax = plt.subplots(nrows=2,
ncols=3,
figsize=(15, 10),
sharey='row',
sharex='col')
fig.subplots_adjust(bottom=0.2)
name2_l = self.ssnames[name1] # for df names name2 only
name12_l = self.graph_name1_ssnames[name1] # for column names name1 + name2
for j in range(len(name2_l)):
sns.set(style="whitegrid")
sns.despine(right=True, top=True)
sns.histplot(data=res12[name1][name2_l[j]],
x=key_vars[i] + "_" + the_panel,
hue=name12_l[j] + '_NicheDummy',
ax=ax.flat[j])
sns.despine(right=True, top=True)
graph_title = self.name12_graph_title_dict[name12_l[j]]
ax.flat[j].set_title(graph_title)
ax.flat[j].set_ylabel(self.graph_dep_vars_ylabels[key_vars[i]])
ax.flat[j].xaxis.set_visible(True)
ax.flat[j].legend().set_visible(False)
fig.legend(labels=['Niche App : Yes', 'Niche App : No'],
loc='lower right', ncol=2)
# ------------ set title and save ---------------------------------------------
self._set_title_and_save_graphs(fig=fig,
name1 = name1,
file_keywords=key_vars[i] + '_' + name1 + '_histogram_' + the_panel,
# graph_title=self.graph_name1_titles[name1] + \
# ' Cross Section Histogram of \n' + \
# self.graph_dep_vars_titles[key_vars[i]] + the_panel,
relevant_folder_name='pricing_vars_stats')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_df=self.cdf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def table_descriptive_stats_pricing_vars(self, the_panel):
"""
The table basic is the data version of graph_descriptive_stats_pricing_vars, but putting
all combos into a single table for each panel.
"""
for groupby_var in ['cluster_size_bin', 'NicheDummy']:
res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var(
group_by_var=groupby_var,
the_panel=the_panel)
total_df = []
total_keys = []
for name1, value1 in res12.items():
ldf = []
keys_ldf = []
for name2, value2 in value1.items():
groupby_var2 = name1 + '_' + name2 + '_' + groupby_var
df = value2.copy()
# --------- cluster size depand on whether you used option combine_tex_tcluster --------------------
df2 = df[['LogWNImputedprice_'+ the_panel,
'LogImputedminInstalls_'+ the_panel,
'offersIAPTrue_'+ the_panel,
'containsAdsTrue_'+ the_panel,
groupby_var2]].groupby(groupby_var2).describe()
ldf.append(df2)
keys_ldf.append(name2)
df4 = | pd.concat(ldf, keys=keys_ldf) | pandas.concat |
from __future__ import division
'''
NeuroLearn Statistics Tools
===========================
Tools to help with statistical analyses.
'''
__all__ = ['pearson',
'zscore',
'fdr',
'holm_bonf',
'threshold',
'multi_threshold',
'winsorize',
'trim',
'calc_bpm',
'downsample',
'upsample',
'fisher_r_to_z',
'one_sample_permutation',
'two_sample_permutation',
'correlation_permutation',
'matrix_permutation',
'jackknife_permutation',
'make_cosine_basis',
'summarize_bootstrap',
'regress',
'procrustes',
'procrustes_distance',
'align',
'find_spikes',
'correlation',
'distance_correlation',
'transform_pairwise',
'double_center',
'u_center',]
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr, kendalltau, norm, ttest_1samp
from scipy.stats import t as t_dist
from scipy.spatial.distance import squareform, pdist
from copy import deepcopy
import nibabel as nib
from scipy.interpolate import interp1d
import warnings
import itertools
from joblib import Parallel, delayed
import six
from .utils import attempt_to_import, check_square_numpy_matrix
from .external.srm import SRM, DetSRM
from scipy.linalg import orthogonal_procrustes
from scipy.spatial import procrustes as procrust
from scipy.ndimage import label, generate_binary_structure
from sklearn.utils import check_random_state
from sklearn.metrics import pairwise_distances
MAX_INT = np.iinfo(np.int32).max
# Optional dependencies
sm = attempt_to_import('statsmodels.tsa.arima_model', name='sm')
def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y.
From neurosynth.stats.py - author: <NAME>
"""
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam*datam, axis=1))
# datass = np.sqrt(ss(datam, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs
def zscore(df):
""" zscore every column in a pandas dataframe or series.
Args:
df: (pd.DataFrame) Pandas DataFrame instance
Returns:
z_data: (pd.DataFrame) z-scored pandas DataFrame or series instance
"""
if isinstance(df, pd.DataFrame):
return df.apply(lambda x: (x - x.mean())/x.std())
elif isinstance(df, pd.Series):
return (df-np.mean(df))/np.std(df)
else:
raise ValueError("Data is not a Pandas DataFrame or Series instance")
def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. Written by <NAME>
Args:
p: (np.array) vector of p-values (only considers non-zero p-values)
q: (float) false discovery rate level
Returns:
fdr_p: (float) p-value threshold based on independence or positive
dependence
"""
if not isinstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
fdr_p = s[max(below)] if len(below) else -1
return fdr_p
def holm_bonf(p, alpha=.05):
""" Compute corrected p-values based on the Holm-Bonferroni method, i.e. step-down procedure applying iteratively less correction to highest p-values. A bit more conservative than fdr, but much more powerful thanvanilla bonferroni.
Args:
p: (np.array) vector of p-values
alpha: (float) alpha level
Returns:
bonf_p: (float) p-value threshold based on bonferroni
step-down procedure
"""
if not isinstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = .05 / (nvox - np.arange(1, nvox + 1) + 1)
below = np.where(s <= null)[0]
bonf_p = s[max(below)] if len(below) else -1
return bonf_p
def threshold(stat, p, thr=.05, return_mask=False):
""" Threshold test image by p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (float) p-value to threshold stat image
return_mask: (bool) optionall return the thresholding mask; default False
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(stat, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not isinstance(p, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
# Create Mask
mask = deepcopy(p)
if thr > 0:
mask.data = (mask.data < thr).astype(int)
else:
mask.data = np.zeros(len(mask.data), dtype=int)
# Apply Threshold Mask
out = deepcopy(stat)
if np.sum(mask.data) > 0:
out = out.apply_mask(mask)
out.data = out.data.squeeze()
else:
out.data = np.zeros(len(mask.data), dtype=int)
if return_mask:
return out, mask
else:
return out
def multi_threshold(t_map, p_map, thresh):
""" Threshold test image by multiple p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (list) list of p-values to threshold stat image
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(t_map, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not isinstance(p_map, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
if not isinstance(thresh, list):
raise ValueError('Make sure thresh is a list of p-values')
affine = t_map.to_nifti().get_affine()
pos_out = np.zeros(t_map.to_nifti().shape)
neg_out = deepcopy(pos_out)
for thr in thresh:
t = threshold(t_map, p_map, thr=thr)
t_pos = deepcopy(t)
t_pos.data = np.zeros(len(t_pos.data))
t_neg = deepcopy(t_pos)
t_pos.data[t.data > 0] = 1
t_neg.data[t.data < 0] = 1
pos_out = pos_out+t_pos.to_nifti().get_data()
neg_out = neg_out+t_neg.to_nifti().get_data()
pos_out = pos_out + neg_out*-1
return Brain_Data(nib.Nifti1Image(pos_out, affine))
def winsorize(data, cutoff=None, replace_with_cutoff=True):
''' Winsorize a Pandas DataFrame or Series with the largest/lowest value not considered outlier
Args:
data: (pd.DataFrame, pd.Series) data to winsorize
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values; (default: False)
Returns:
out: (pd.DataFrame, pd.Series) winsorized data
'''
return _transform_outliers(data, cutoff, replace_with_cutoff=replace_with_cutoff, method='winsorize')
def trim(data, cutoff=None):
''' Trim a Pandas DataFrame or Series by replacing outlier values with NaNs
Args:
data: (pd.DataFrame, pd.Series) data to trim
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
Returns:
out: (pd.DataFrame, pd.Series) trimmed data
'''
return _transform_outliers(data, cutoff, replace_with_cutoff=None, method='trim')
def _transform_outliers(data, cutoff, replace_with_cutoff, method):
''' This function is not exposed to user but is called by either trim
or winsorize.
Args:
data: (pd.DataFrame, pd.Series) data to transform
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values. (default: False)
method: 'winsorize' or 'trim'
Returns:
out: (pd.DataFrame, pd.Series) transformed data
'''
df = data.copy() # To not overwrite data make a copy
def _transform_outliers_sub(data, cutoff, replace_with_cutoff, method='trim'):
if not isinstance(data, pd.Series):
raise ValueError('Make sure that you are applying winsorize to a pandas dataframe or series.')
if isinstance(cutoff, dict):
# calculate cutoff values
if 'quantile' in cutoff:
q = data.quantile(cutoff['quantile'])
elif 'std' in cutoff:
std = [data.mean()-data.std()*cutoff['std'][0], data.mean()+data.std()*cutoff['std'][1]]
q = | pd.Series(index=cutoff['std'], data=std) | pandas.Series |
import common, cost, fetchresults
import logging, sys, datetime
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import pickle
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
if 1:
results = fetchresults.get_all_with_status()
pickle.dump(results, open(r'data_tmp\latestall', 'wb'))
else:
results = pickle.load(open(r'data_tmp\latestall', 'rb'))
date_all = []
compute_hours_all = []
for result in results:
duration_ix = common.make_int_array(result['duration_ix'])
duration_ecl2ix = common.make_int_array(result['duration_ecl2ix'])
nprocesses_ix = common.make_int_array(result['num_processes_ix'])
nprocesses_ix[nprocesses_ix == 0] = 1 # hack for now, this has been checked
nthreads_ix = common.make_int_array(result['num_threads_ix'])
nvcpu_ix = nthreads_ix * nprocesses_ix
compute_seconds = np.sum(duration_ix * nvcpu_ix + duration_ecl2ix)
compute_hours = compute_seconds/60**2
date_all += [result['posted']]
compute_hours_all += [compute_hours]
date = pd.to_datetime(pd.Series(date_all))
compute_hours = | pd.DataFrame(compute_hours_all, index=date) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas.util.testing as tm
from pandas.tseries import offsets
from pandas._libs.tslibs.frequencies import (get_rule_month,
_period_str_to_code,
INVALID_FREQ_ERR_MSG,
is_superperiod, is_subperiod)
def assert_aliases_deprecated(freq, expected, aliases):
assert isinstance(aliases, list)
assert (_period_str_to_code(freq) == expected)
for alias in aliases:
with tm.assert_raises_regex(ValueError, INVALID_FREQ_ERR_MSG):
_period_str_to_code(alias)
def test_get_rule_month():
result = get_rule_month('W')
assert (result == 'DEC')
result = get_rule_month(offsets.Week())
assert (result == 'DEC')
result = get_rule_month('D')
assert (result == 'DEC')
result = get_rule_month(offsets.Day())
assert (result == 'DEC')
result = get_rule_month('Q')
assert (result == 'DEC')
result = get_rule_month(offsets.QuarterEnd(startingMonth=12))
result = get_rule_month('Q-JAN')
assert (result == 'JAN')
result = get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert (result == 'JAN')
result = get_rule_month('A-DEC')
assert (result == 'DEC')
result = get_rule_month('Y-DEC')
assert (result == 'DEC')
result = get_rule_month(offsets.YearEnd())
assert (result == 'DEC')
result = get_rule_month('A-MAY')
assert (result == 'MAY')
result = get_rule_month('Y-MAY')
assert (result == 'MAY')
result = get_rule_month(offsets.YearEnd(month=5))
assert (result == 'MAY')
def test_period_str_to_code():
assert (_period_str_to_code('A') == 1000)
assert (_period_str_to_code('A-DEC') == 1000)
assert (_period_str_to_code('A-JAN') == 1001)
assert (_period_str_to_code('Y') == 1000)
assert (_period_str_to_code('Y-DEC') == 1000)
assert (_period_str_to_code('Y-JAN') == 1001)
assert (_period_str_to_code('Q') == 2000)
assert (_period_str_to_code('Q-DEC') == 2000)
assert (_period_str_to_code('Q-FEB') == 2002)
assert_aliases_deprecated("M", 3000, ["MTH", "MONTH", "MONTHLY"])
assert (_period_str_to_code('W') == 4000)
assert (_period_str_to_code('W-SUN') == 4000)
assert (_period_str_to_code('W-FRI') == 4005)
assert_aliases_deprecated("B", 5000, ["BUS", "BUSINESS",
"BUSINESSLY", "WEEKDAY"])
assert_aliases_deprecated("D", 6000, ["DAY", "DLY", "DAILY"])
assert_aliases_deprecated("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"])
assert_aliases_deprecated("T", 8000, ["minute", "MINUTE", "MINUTELY"])
assert (_period_str_to_code('Min') == 8000)
assert_aliases_deprecated("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"])
assert_aliases_deprecated("L", 10000, ["MILLISECOND", "MILLISECONDLY"])
assert (_period_str_to_code('ms') == 10000)
assert_aliases_deprecated("U", 11000, ["MICROSECOND", "MICROSECONDLY"])
assert (_period_str_to_code('US') == 11000)
assert_aliases_deprecated("N", 12000, ["NANOSECOND", "NANOSECONDLY"])
assert (_period_str_to_code('NS') == 12000)
def test_is_superperiod_subperiod():
# input validation
assert not (is_superperiod(offsets.YearEnd(), None))
assert not (is_subperiod(offsets.MonthEnd(), None))
assert not (is_superperiod(None, offsets.YearEnd()))
assert not (is_subperiod(None, offsets.MonthEnd()))
assert not (is_superperiod(None, None))
assert not (is_subperiod(None, None))
assert (is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert (is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert (is_superperiod(offsets.Hour(), | offsets.Minute() | pandas.tseries.offsets.Minute |
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from Helpers import find_between
class CJH_Archives:
def __init__(self, repo, url=False):
self.repo = repo
self.url = url
def get_meta_data(self, object_type, page_to_start_at, maximum_pages_to_scrape):
def scrape_all_records(object_type='records', start_page=1, stop_after_pages=0, url_override=False):
"""
URL OVERRIDE MUST BE WITHOUT THE START PAGE
"""
if start_page <= 0:
print("Must start at minimum of page 1")
start_page = 1
page = start_page
else:
page = start_page
if object_type.upper() == 'RECORDS':
print("Scraping All Individual Records")
headless_url = "https://archives.cjh.org/repositories/3/objects?q[]=%2A&op[]=OR&field[]=keyword&from_year[]=&to_year[]=&limit=digital_object,archival_object&sort=title_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'COLLECTIONS':
# page = start_page
print("Scraping Collections (Finding Aids)")
headless_url = "https://archives.cjh.org/repositories/3/resources?q[]=%2A&op[]=&field[]=title&from_year[]=&to_year[]=&limit=resource&sort=year_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'DIGITAL':
# page = start_page
print("Scraping Digital Records")
headless_url = "https://archives.cjh.org/repositories/3/objects?q[]=%2A&op[]=OR&field[]=keyword&from_year[]=&to_year[]=&limit=digital_object&sort=year_sort%20asc&page="
base_URL = str(headless_url + str(page))
elif object_type.upper() == 'CUSTOM':
headless_url = url_override
base_URL = str(headless_url + str(page))
def scrape_record(name, link, web_page, object_type):
# print(web_page, link)
# (.+?)
# meta_dict = find_between(str(i),'<script type="application/ld+json">',' </script>' )
# meta_dict = re.findall(r'>(', str(web_page))
title = (web_page.title)
part_of = web_page.find_all('ul', {'class': 'breadcrumb'})
part_of = part_of[0].find_all('a')
location_tupes = []
for i in part_of:
link = (str(i).split('"')[1])
found_loc_name = (str(i).split('>')[1]).split('<')[0]
tupp = (found_loc_name, link)
location_tupes.append(tupp)
locs = (location_tupes)
subnotes = web_page.find_all('div', {'class': 'upper-record-details'})[0].text
div_data_1 = [("Name", name), ("Link", link)]
acord = web_page.find_all('div', {'class': 'acc_holder clear'})[0].text
acc_data = []
if object_type.upper() == 'RECORDS':
possible_fields_1 = [
"Scope and Contents",
"Dates",
"Language of Materials",
"Access Restrictions",
"Extent",
]
possible_fields_2 = [
"Related Names",
"Digital Material",
"Physical Storage Information",
"Repository Details",
]
elif object_type.upper() == 'COLLECTIONS':
possible_fields_1 = [
"Scope and Content Note",
"Dates",
"Creator",
"Access Restrictions",
"Use Restrictions",
"Conditions Governing Access",
"Conditions Governing Use",
"Extent",
"Language of Materials"
]
possible_fields_2 = [
"Additional Description",
"Subjects",
"Related Names",
"Finding Aid & Administrative Information",
'Physical Storage Information',
'Repository Details',
]
##subnotes
b1 = []
pc_1 = []
for i in possible_fields_1:
if i in str(subnotes):
out = True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
pc_1.append(str(subnotes).find(i))
b1.append(out)
##accordian
b2 = []
pc_2 = []
for i in possible_fields_2:
if i in str(acord):
out = True
else:
out = False
missingTuple = (i, '')
div_data_1.append(missingTuple)
pc_2.append(str(acord).find(i))
b2.append(out)
xs = possible_fields_1
ys = b1
filtered1 = np.array(xs)[np.array(ys)]
xs = possible_fields_2
ys = b2
filtered2 = np.array(xs)[np.array(ys)]
no_emps1 = filter(lambda a: a != -1, pc_1)
no_emps2 = filter(lambda a: a != -1, pc_2)
aaa = [y for x, y in sorted(zip(no_emps1, filtered1))]
bbb = [y for x, y in sorted(zip(no_emps2, filtered2))]
indexer = 0
filtered1 = aaa
filtered2 = bbb
for i in filtered1:
first = i
try:
next = filtered1[indexer + 1]
except BaseException as e:
next = '$$$'
value = find_between(subnotes, first, next)
value = value.replace('\n', ' ').strip().replace('\t', ' ')
val = (i, value)
div_data_1.append(val)
indexer += 1
indexer = 0
for i in filtered2:
first = i
try:
next = filtered1[indexer + 1]
except BaseException as e:
next = '$$$'
value = find_between(acord, first, next)
value = value.replace('\n', ' ').strip().replace('\t', ' ')
val = (i, value)
div_data_1.append(val)
indexer += 1
bigList = (div_data_1)
return tuple(bigList)
URL = base_URL
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
pagnation = web_page.find_all('ul', {'class': 'pagination'})[0].find_all('li')
next_link = (web_page.find_all('li', {'class': 'next'})[0]).find('a', href=True)
linkky = str(next_link)
nextPage_ = str("https://archives.cjh.org" + (linkky.split('"')[1]))
pageList = []
s_pages = []
for i in pagnation:
number = str(i).split('>')[2].split('<')[0]
pageList.append((number))
test_list = []
for i in pageList:
try:
test_list.append(int(i))
except:
pass
last_page__ = (max(test_list))
__lastPage = page + stop_after_pages
page_counter = page
tupleList = []
for i in range(page, __lastPage):
row_list = []
pagez = i
print("Scraping Archive Index for Entry Links", pagez)
page_current = page_counter
URL = str(headless_url + str(i))
web_page = BeautifulSoup(requests.get(URL, {}).text, "lxml")
h3s = web_page.find_all('h3')
for i in h3s:
try:
link = ((str(i).split('href="')[1]).split('"'))[0]
name = (str(i).split('">'))[1].split("</a")[0]
data_tuple = (name, str("https://archives.cjh.org" + link), link)
tupleList.append(data_tuple)
except BaseException as e:
pass
page_counter += 1
archIndex = | pd.DataFrame.from_records(tupleList, columns=['Names', 'Link', 'Location']) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = | pd.DataFrame(all_time_ids_byStock,columns=['time_id']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 01:31:54 2021
@author: yoonseok
"""
import os
import pandas as pd
from tqdm import tqdm
from scipy.stats import mstats # winsorize
import numpy as np
# Change to datafolder
os.chdir(r"C:\data\car\\")
# 기본 테이블 입력
df = pd.read_csv("dataframe_h1.txt")
del df["Unnamed: 0"]
df = df.dropna(subset=["8"])
# 공시일자 추출
df["date"] = [x[0:10].replace(".","") for x in df["6"]]
# 연도 입력
df["year"] = [int(x[1:5]) for x in df["5"]]
# Key 코딩
carKey = []
for number in range(len(df)):
carKey.append(str(df.iloc[number,6].astype(int)) + str(df.iloc[number,17]))
key = []
for i in carKey:
key.append(int(i))
df["carKey"] = key
# 이익공시일 자료 입력
df2 = pd.read_csv("car_2_earningsAccouncementDate.csv")
del df2["Unnamed: 0"]
df['dateE'] = df['carKey'].map(df2.set_index("carKey")['rcept_dt'])
df = df.dropna(subset=["dateE"])
date = []
for i in df["dateE"]: # 이익공시 누적초과수익률은 [-1,1] 이므로 매핑 날짜를 하루 전날로 바꾼다
if str(i)[4:8] == "0201": # 1월 2일과 3월 2일
i = int(str(i)[0:4] + "0131")
else:
i = int(i) -1
date.append(int(i))
df["dateE"] = date
# car 코딩
car = []
for number in range(len(df)):
car.append(str(df.iloc[number,16]) + str(df.iloc[number,6].astype(int)))
key = []
for i in car:
key.append(int(i))
df["car"] = key
# car_e 코딩
car_e = []
for number in range(len(df)):
car_e.append(str(df.iloc[number,19]) + str(df.iloc[number,6].astype(int)))
key = []
for i in car_e:
key.append(int(i))
df["car_e"] = key
# CAR 작업 폴더로 변경
os.chdir("C:\data\stockinfo\car\\") # 작업 폴더로 변경
# CAR 계산된 시트 전체 취합
year = 1999
CAR = pd.read_csv("CAR_" + str(year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
for year in tqdm(range(0, 21)):
CAR2 = pd.read_csv("CAR_" + str(2000 + year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
CAR = pd.concat([CAR, CAR2])
CAR = CAR.sort_values(by=["0", "date"])
key = []
for i in tqdm(CAR["match"]):
try:
key.append(int(i))
except ValueError:
key.append('')
CAR["match"] = key
CAR = CAR.dropna(subset=["CAR[0,2]_it"])
CAR = CAR.replace(r'^\s*$', np.nan, regex=True)
CAR = CAR.dropna(subset=["match"])
CAR = CAR.drop_duplicates(subset=["match"])
# CAR 처리
df['car_val'] = df['car'].map(CAR.set_index("match")['CAR[0,2]_it'])
df['car_e_val'] = df['car_e'].map(CAR.set_index("match")['CAR[0,2]_it'])
df = df.dropna(subset=["car_val", "car_e_val"])
# fileLate 계산 준비
## 전기말 별도 자산총계 입력
asset_prev = pd.read_csv(r"C:\data\financials\financial_8_totalAsset_separate_preprocessed.txt")
asset_prev = asset_prev.drop_duplicates(subset=["assetKey"])
## AssetKey 생성
assetKey = []
for entry in df["key"]:
key = entry[22:]
assetKey.append(key)
df["assetKey"] = assetKey
## 전기말 별도 자산총계 매핑
df['asset_py'] = df['assetKey'].map(asset_prev.set_index("assetKey")['asset'])
df = df.dropna(subset=['asset_py'])
## 2조 이상 표시
df["large"] = [1 if x >= 2000000000000 else 0 for x in df["asset_py"]]
# 유사도(SCORE^A) 산출값 DF 변환
score = pd.read_csv(r"C:\data\h1.score.count.txt")
del score["Unnamed..0"]
del score["X"]
# 총자산 DF 변환
asset = pd.read_csv(r"C:\data\financials\financial_1_totalAsset_preprocessed.txt")
# 입수 감사보고서 정보 DF 변환
auditor = pd.read_csv(r"C:\data\financials\auditReport_1_auditor_preprocessed.txt")
del auditor["Unnamed: 0"]
gaap = pd.read_csv(r"C:\data\financials\auditReport_2_gaap_preprocessed.txt")
del gaap["Unnamed: 0"]
# Merge DF
result = pd.merge(df, score, how="inner", on=["key"])
result = | pd.merge(result, asset[["key", "asset"]], how="inner", on=["key"]) | pandas.merge |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = | pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols) | pandas.DataFrame |
import logging
l = logging.getLogger("abg")
import flask
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from flask import Markup
from flask import send_file
from flask import abort
l.error("flask")
from abg_stats.extensions import login_manager
from abg_stats.public.forms import LoginForm
from abg_stats.user.forms import RegisterForm
from abg_stats.user.models import User
from abg_stats.utils import flash_errors
l.error("abg_stats")
import os
import matplotlib
matplotlib.use('agg')
l.error("matplot")
import pandas as pd
l.error("Pandas import")
import matplotlib.pyplot as plt
import numpy as np
l.error("Pandas and numpy")
# from urlparse import urlparse
from pprint import pprint as pp
from io import BytesIO
import base64
import random
import scipy.stats as stats
import scipy
from pandas_highcharts.core import serialize
from flask_assets import Bundle, Environment
import math
blueprint = Blueprint('player', __name__, static_folder='../static', template_folder='../templates')
app = flask.current_app
def build_elo_dist_chart(df):
return serialize(df, render_to="elo_stddev_chart", output_type="json", title="Compared to all players having experience over {}".format(app.config['XP_THRESHOLD']))
def build_elo_history(player_matches):
# chartdf = player_matches[['Date', 'Player ELO']]
#
# chartdf["Date"] = pd.DatetimeIndex(chartdf["Date"]).astype(int) / 1000 / 1000
# chartdf.set_index("Date", inplace=True)
matches_without_dq = player_matches[player_matches["DQ"] == False]
chartdf = matches_without_dq[['Date', 'Player ELO']]
winrate_chart = matches_without_dq[["Date", "W"]]
winrate_chart["wins"] = winrate_chart['W'].cumsum()
winrate_chart["dumb"] = 1
winrate_chart["count"] = winrate_chart["dumb"].cumsum()
winrate_chart["Win Rate"] = winrate_chart["wins"] / winrate_chart["count"]
winrate_chart = winrate_chart[["Date", "Win Rate"]]
chartdf["Date"] = pd.DatetimeIndex(chartdf["Date"])
chartdf["Win Rate"] = winrate_chart["Win Rate"]
chartdf.set_index("Date", inplace=True)
z = chartdf.resample('w').mean()
z = z.fillna(method='bfill')
z["Player ELO"] = z["Player ELO"].map(lambda x: round(x))
z["Win Rate"] = z["Win Rate"].map(lambda x: round(x * 100))
z.columns = ["ELO", "Win Rate"]
#pp(chartdf.index)
#grouped = pd.groupby(chartdf,by=[chartdf.index.month,chartdf.index.year])["Player ELO"].mean()
#chartdf["Player_ELO_rolling"] = pd.rolling_mean(chartdf["Player ELO"], window=5)
#rouped = chartdf[["Player_ELO_rolling"]]
return serialize(z, secondary_y = ["Win Rate"], render_to='elo_chart', output_type='json', title="ELO and win rate history")
def get_player_matches_df(matches, player_name):
player_matches = matches[(matches['player1-name'] == player_name) | (matches['player2-name'] == player_name)]
player_winner = matches[matches["winner"] == player_name]
player_loser = matches[matches["loser"] == player_name]
player_winner["player_elo_change"] = matches["winner_elo_change"]
player_loser["player_elo_change"] = matches["loser_elo_change"]
player_winner["player_elo"] = matches["winner_elo"]
player_loser["player_elo"] = matches["loser_elo"]
player_winner["W"] = 1
player_winner["L"] = 0
player_loser["W"] = 0
player_loser["L"] = 1
player_winner["opponent"] = player_winner["loser"]
player_loser["opponent"] = player_loser["winner"]
player_matches = | pd.concat([player_winner, player_loser]) | pandas.concat |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
file1 = '../data/STRIDE_PATIENT.xlsx'
x1 = pd.ExcelFile(file1)
stride_patient = x1.parse('Sheet1')
file2 = '../data//SURGERY.xlsx'
x2 = | pd.ExcelFile(file2) | pandas.ExcelFile |
# general
import math
import logging
import json
import os,sys
from pIMZ.regions import SpectraRegion
import random
from collections import defaultdict, Counter
import glob
import shutil, io, base64
# general package
from natsort import natsorted
import pandas as pd
import numpy as np
from numpy.ctypeslib import ndpointer
from pyimzml.ImzMLParser import ImzMLParser, browse, getionimage
import ms_peak_picker
import regex as re
# image
import skimage
from skimage import measure as sk_measure
from adjustText import adjust_text
# processing
import ctypes
import subprocess
import dill as pickle
#vis
import dabest
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#methods
import umap
import hdbscan
import diffxpy.api as de
import anndata
from scipy import ndimage, misc, sparse
from scipy.sparse.linalg import spsolve
from scipy.spatial.distance import squareform, pdist
import scipy.cluster as spc
import scipy as sp
import sklearn as sk
from sklearn.metrics.pairwise import cosine_similarity
#web/html
import jinja2
# applications
import progressbar
class CombinedSpectra():
"""CombinedSpectra class for a combined analysis of several spectra regions.
"""
def __setlogger(self):
"""Sets up logging facilities for CombinedSpectra.
"""
self.logger = logging.getLogger('CombinedSpectra')
if len(self.logger.handlers) == 0:
self.logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
def __init__(self, regions):
"""Initializes a CombinedSpectra object with the following attributes:
- logger (logging.Logger): Reference to the Logger object.
- regions (dict): A dictionary that has SpectraRegion objects names as keys and respective SpectraRegion objects as values. If a SpectraRegion object does not have a name attribute it will be named according to the region id.
- consensus_similarity_matrix (pandas.DataFrame): Pairwise similarity matrix between consensus spectra of all combinations of regions. Initialized with None.
- region_cluster2cluster (dict): A dictionary where every tuple (region name, region id) is mapped to its cluster id where it belongs. Initialized with None.
- region_array_scaled (dict): A dictionary where each SpectraRegion name is mapped to the respective scaled region array either using "avg" (average) or "median" method. Initialized with an empty dict.
- de_results_all (dict): Methods mapped to their differential analysis results (as pd.DataFrame). Initialized with an empty defaultdict.
Args:
regions (dict): A dictionary that maps region ids to respective SpectraRegion objects.
"""
self.regions = {}
self.consensus_similarity_matrix = None
self.region_cluster2cluster = None
self.region_array_scaled = {}
self.de_results_all = defaultdict(lambda: dict())
self.df_results_all = defaultdict(lambda: dict())
self.logger = None
self.__setlogger()
for x in regions:
addregion = regions[x]
if addregion.name == None:
addregion.name = x
self.regions[addregion.name] = regions[x]
def __get_spectra_similarity(self, vA, vB):
"""Calculates cosine similarity between two vectors of the same length.
Args:
vA (numpy.array/list): First vector.
vB (numpy.array/list): Second vector.
Returns:
float: cosine similarity.
"""
if (np.all((vA == 0)) or np.all((vB == 0))):
return 0
return np.dot(vA, vB) / (np.sqrt(np.dot(vA,vA)) * np.sqrt(np.dot(vB,vB)))
def consensus_similarity(self):
"""
Calculates consensus_similarity_matrix of CombinedSpectra object.
The resulting pandas.DataFrame is a pairwise similarity matrix between consensus spectra of all combinations of regions.
If the object was not yet scaled, it will get scaled.
"""
self.check_scaled()
allConsSpectra = {}
for regionName in self.region_array_scaled:
scaled_region = self.region_array_scaled[regionName]
region = self.regions[regionName]
regionCS = region.consensus_spectra(array=scaled_region, set_consensus=False)
for clusterid in regionCS:
allConsSpectra[(regionName, clusterid)] = regionCS[clusterid]
allRegionClusters = sorted([x for x in allConsSpectra])
distDF = pd.DataFrame(0.0, index=allRegionClusters, columns=allRegionClusters)
for i in range(0, len(allRegionClusters)):
regionI = allRegionClusters[i]
for j in range(i, len(allRegionClusters)):
regionJ = allRegionClusters[j]
specSim = self.__get_spectra_similarity(allConsSpectra[regionI], allConsSpectra[regionJ])
distDF[regionI][regionJ] = specSim
distDF[regionJ][regionI] = specSim
self.consensus_similarity_matrix = distDF
def plot_consensus_similarity(self):
"""Plots the similarity matrix represented as seaborn.heatmap.
"""
sns.heatmap(self.consensus_similarity_matrix, xticklabels=1, yticklabels=1)
plt.show()
plt.close()
def cluster_concensus_spectra(self, number_of_clusters=5):
"""Performs clustering using Ward variance minimization algorithm on similarity matrix of consensus spectra and updates region_cluster2cluster with the results. region_cluster2cluster dictionary maps every tuple (region name, region id) to its cluster id where it belongs. Additionally plots the resulting dendrogram depicting relationships of regions to each other.
Args:
number_of_clusters (int, optional): Number of desired clusters. Defaults to 5.
"""
df = self.consensus_similarity_matrix.copy()
# Calculate the distance between each sample
Z = spc.hierarchy.linkage(df.values, 'ward')
plt.figure(figsize=(8,8))
# Make the dendro
spc.hierarchy.dendrogram(Z, labels=df.columns.values, leaf_rotation=0, orientation="left", color_threshold=240, above_threshold_color='grey')
c = spc.hierarchy.fcluster(Z, t=number_of_clusters, criterion='maxclust')
lbl2cluster = {}
region2cluster = {}
for lbl, clus in zip(df.columns.values, c):
lbl2cluster[str(lbl)] = clus
region2cluster[lbl] = clus
# Create a color palette with 3 color for the 3 cyl possibilities
my_palette = plt.cm.get_cmap("viridis", number_of_clusters)
# Apply the right color to each label
ax = plt.gca()
xlbls = ax.get_ymajorticklabels()
for lbl in xlbls:
val=lbl2cluster[lbl.get_text()]
#print(lbl.get_text() + " " + str(val))
lbl.set_color(my_palette(val-1))
plt.show()
plt.close()
self.region_cluster2cluster = region2cluster
def check_scaled(self):
"""Detects not scaled region arrays and norms them using "median" method.
"""
hasToReprocess = False
for regionName in self.regions:
if not regionName in self.region_array_scaled:
hasToReprocess = True
break
if hasToReprocess:
self.logger.info("Calculating internormed regions")
self.get_internormed_regions()
def mass_intensity(self, masses, regions=None, scaled=False, verbose=True):
"""Plots seaborn.boxplot for every selected region depicting the range of intensity values in each cluster.
Args:
masses (float/list/tuple/set): Desired mass(es).
regions (list/numpy.array, optional): Desired regions where to look for mass intensities. Defaults to None meaning to consider all available regions.
scaled (bool, optional): Whether to use intensity values of scaled region arrays. Defaults to False.
verbose (bool, optional): Whether to add information to the logger. Defaults to True.
"""
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
if scaled:
self.check_scaled()
for regionName in self.regions:
if not regions is None and not regionName in regions:
continue
cregion = self.regions[regionName]
cluster2coords = cregion.getCoordsForSegmented()
if not scaled:
dataArray = cregion.region_array
else:
dataArray = self.region_array_scaled[regionName]
for mass in masses:
bestExMassForMass, bestExMassIdx = cregion._get_exmass_for_mass(mass)
if verbose:
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
clusterIntensities = defaultdict(list)
for clusterid in cluster2coords:
for coord in cluster2coords[clusterid]:
intValue = dataArray[coord[0], coord[1], bestExMassIdx]
clusterIntensities[clusterid].append(intValue)
clusterVec = []
intensityVec = []
massVec = []
specIdxVec = []
for x in clusterIntensities:
elems = clusterIntensities[x]
specIdxVec += [i for i in range(0, len(elems))]
clusterVec += ["Cluster " + str(x)] * len(elems)
intensityVec += elems
massVec += [mass] * len(elems)
dfObj = pd.DataFrame({"mass": massVec, "specidx": specIdxVec, "cluster": clusterVec, "intensity": intensityVec})
sns.boxplot(data=dfObj, x="cluster", y="intensity")
plt.xticks(rotation=90)
plt.title("Intensities for Region {} ({}m/z)".format(regionName, mass))
plt.show()
plt.close()
def mass_heatmap(self, masses, log=False, min_cut_off=None, plot=True, scaled=False, verbose=True, title="{mz}"):
"""Plots heatmap for every selected region depicting region_array spectra reduced to the sum of the specified masses.
Args:
masses (float/list/tuple/set): Desired mass(es).
log (bool, optional): Whether to take logarithm of the output matrix. Defaults to False.
min_cut_off (int/float, optional): Lower limit of values in the output matrix. Smaller values will be replaced with min_cut_off. Defaults to None.
plot (bool, optional): Whether to plot the output matrix. Defaults to True.
scaled (bool, optional): Whether to use intensity values of scaled region arrays. Defaults to False.
verbose (bool, optional): Whether to add information to the logger. Defaults to True.
title (str, optional): Format string defining the plot's title.
Returns:
numpy.array: A matrix of the last region where each element is a sum of intensities at given masses.
"""
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
if scaled:
self.check_scaled()
region2segments = {}
for regionName in self.regions:
cregion = self.regions[regionName]
if scaled == False:
dataArray = self.regions[regionName].region_array
else:
dataArray = self.region_array_scaled[regionName]
image = np.zeros((dataArray.shape[0], dataArray.shape[1]))
for mass in masses:
bestExMassForMass, bestExMassIdx = cregion._get_exmass_for_mass(mass)
if verbose:
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
for i in range(dataArray.shape[0]):
for j in range(dataArray.shape[1]):
image[i,j] += dataArray[i,j,bestExMassIdx]
if log:
image = np.log(image)
if min_cut_off != None:
image[image <= min_cut_off] = min_cut_off
region2segments[regionName] = image
if plot:
rows = math.ceil(len(self.regions) / 2)
fig, axes = plt.subplots(rows, 2)
if len(axes.shape) > 1:
axes = np.reshape(axes, (1, axes.shape[0] * axes.shape[1]))[0][:]
allMin, allMax = 0,0
for regionName in region2segments:
allMin = min(allMin, np.min(region2segments[regionName]))
allMax = max(allMax, np.max(region2segments[regionName]))
didx = 0
for didx, regionName in enumerate(region2segments):
ax = axes[didx]
heatmap = ax.matshow(region2segments[regionName], vmin=allMin, vmax=allMax)
# We must be sure to specify the ticks matching our target names
ax.set_title(regionName, color="w", y=0.1)
for ddidx in range(didx+1, rows*2):
ax = axes[ddidx]
ax.axis('off')
#fig.colorbar(heatmap, ax=axes[-1])
plt.colorbar(heatmap, ax=axes[:], spacing='proportional')
plt.suptitle(title.format(mz=";".join([str(round(x, 3)) if not type(x) in [str] else x for x in masses])))
plt.show()
plt.close()
return image
def plot_segments(self, highlight=None):
"""Plots segmented arrays of all regions as heatmaps.
Args:
highlight (list/tuple/set/int, optional): If cluster ids are specified here, the resulting clustering will have cluster id 2 for highlight clusters, cluster id 0 for background, and cluster id 1 for the rest. Defaults to None.
"""
assert(not self.region_cluster2cluster is None)
allClusters = [self.region_cluster2cluster[x] for x in self.region_cluster2cluster]
valid_vals = sorted(set(allClusters))
region2segments = {}
for regionName in self.regions:
origSegments = np.array(self.regions[regionName].segmented, copy=True)
region2segments[regionName] = origSegments
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for regionName in region2segments:
showcopy = np.copy(region2segments[regionName])
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
region2segments[regionName] = showcopy
self._plot_arrays(region2segments)
def plot_common_segments(self, highlight=None):
"""Plots segmented arrays of every region annotating the clusters with respect to new clustering done with CombinedSpectra (saved in region_cluster2cluster).
Args:
highlight (list/tuple/set/int, optional): If cluster ids are specified here, the resulting clustering will have cluster id 2 for highlight clusters, cluster id 0 for background, and cluster id 1 for the rest. Defaults to None.
"""
assert(not self.region_cluster2cluster is None)
allClusters = [self.region_cluster2cluster[x] for x in self.region_cluster2cluster]
valid_vals = sorted(set(allClusters))
region2segments = {}
for regionName in self.regions:
origSegments = np.array(self.regions[regionName].segmented, copy=True)
origCluster2New = {}
for x in self.region_cluster2cluster:
if x[0] == regionName:
origCluster2New[x[1]] = self.region_cluster2cluster[x]
newSegments = np.zeros(origSegments.shape)
print(origCluster2New)
for i in range(0, newSegments.shape[0]):
for j in range(0, newSegments.shape[1]):
newSegments[i,j] = origCluster2New.get(origSegments[i,j], 0)
region2segments[regionName] = newSegments
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for regionName in region2segments:
showcopy = np.copy(region2segments[regionName])
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
region2segments[regionName] = showcopy
self._plot_arrays(region2segments)
def _plot_arrays(self, region2segments):
"""Plots heatmaps for every region given in region2segments.
Args:
region2segments (dict): A dictionary with region names as keys and respective segmented arrays as values.
"""
rows = math.ceil(len(region2segments) / 2)
fig, axes = plt.subplots(rows, 2)
valid_vals = set()
for regionName in region2segments:
plotarray = region2segments[regionName]
valid_vals = valid_vals.union(list(np.unique(plotarray)))
valid_vals = sorted(valid_vals)
min_ = min(valid_vals)
max_ = max(valid_vals)
positions = np.linspace(min_, max_, len(valid_vals))
val_lookup = dict(zip(positions, valid_vals))
print(val_lookup)
def formatter_func(x, pos):
'The two args are the value and tick position'
val = val_lookup[x]
return val
if len(axes.shape) > 1:
axes = np.reshape(axes, (1, axes.shape[0] * axes.shape[1]))[0][:]
didx=0
for didx, regionName in enumerate(region2segments):
ax = axes[didx]
im = ax.matshow(region2segments[regionName], cmap=plt.cm.get_cmap('viridis', len(valid_vals)), vmin=min_, vmax=max_)
formatter = plt.FuncFormatter(formatter_func)
# We must be sure to specify the ticks matching our target names
ax.set_title(regionName, color="w", y=0.9, x=0.1)
for ddidx in range(didx+1, rows*2):
ax = axes[ddidx]
ax.axis('off')
plt.colorbar(im, ax=axes[:], ticks=positions, format=formatter, spacing='proportional')
plt.show()
plt.close()
def __make_de_res_key(self, region0, clusters0, region1, clusters1):
"""Generates the storage key for two sets of clusters.
Args:
region0 (int): first region id.
clusters0 (list): list of cluster ids 1.
region1 (int): second region id.
clusters1 (list): list of cluster ids 2.
Returns:
tuple: tuple (region0, sorted clusters0, region1, sorted clusters1)
"""
return (region0, tuple(sorted(clusters0)), region1, tuple(sorted(clusters1)))
def to_region_cluster_input(self, region_cluster_list):
rcl0 = defaultdict(list)
for x in region_cluster_list:
rcl0[x[0]].append(x[1])
rcl0 = [(x, tuple(sorted(rcl0[x]))) for x in rcl0]
return rcl0
def find_markers(self, region_cluster_list0, region_cluster_list1, protWeights, mz_dist=3, mz_best=False, use_methods = ["empire", "ttest", "rank"], count_scale={"ttest": 1, "rank": 1}, scaled=True, sample_max=-1):
"""Performs differential analysis to finds marker proteins for specific regions and clusters.
Args:
region_cluster_list0 (list/numpy.array): A list of tuples (region id, list of clusters) that will be used as the 0 conditional vector by differential analysis.
region_cluster_list1 (list/numpy.array): A list of tuples (region id, list of clusters) that will be used as the 1 conditional vector by differential analysis.
protWeights (ProteinWeights): ProteinWeights object for translation of masses to protein names.
mz_dist (float/int, optional): Allowed offset for protein lookup of needed masses. Defaults to 3.
mz_best (bool, optional): Wether to consider only the closest found protein within mz_dist (with the least absolute mass difference). Defaults to False.
use_methods (str/list, optional): Test method(s) for differential expression. Defaults to ["empire", "ttest", "rank"].\n
- "empire": Empirical and Replicate based statistics (EmpiRe).\n
- "ttest": Welch’s t-test for differential expression using diffxpy.api.\n
- "rank": Mann-Whitney rank test (Wilcoxon rank-sum test) for differential expression using diffxpy.api.\n
count_scale (dict, optional): Count scales for different methods (relevant for empire, which can only use integer counts). Defaults to {"ttest": 1, "rank": 1}.
scaled (bool, optional): Wether each processed region is normalized. Those which are not will be scaled with the median method. Defaults to True.
sample_max (int, optional): Allowed number of samples (spectra of specified regions&clusters) will be used by differential analysis (will be randomly picked if there are more available than allowed). Defaults to -1 meaning all samples are used.
Returns:
tuple: Tuple (collections.defaultdict, pandas.core.frame.DataFrame, pandas.core.frame.DataFrame). Dictionary with test method mapped to each tuple (region, clusters) and respective results. Two further data frames with expression data and test design.
"""
if type(region_cluster_list0) in (list, tuple):
region_cluster_list0 = self.to_region_cluster_input(region_cluster_list0)
if type(region_cluster_list1) in (list, tuple):
region_cluster_list1 = self.to_region_cluster_input(region_cluster_list1)
for pair in region_cluster_list0:
assert(pair[0] in self.regions)
assert([x for x in self.regions[region_cluster_list0[0][0]].idx2mass] == [x for x in self.regions[pair[0]].idx2mass])
for pair in region_cluster_list1:
assert(pair[0] in self.regions)
assert([x for x in self.regions[region_cluster_list1[0][0]].idx2mass] == [x for x in self.regions[pair[0]].idx2mass])
cluster2coords0 = {}
for pair in region_cluster_list0:
cluster2coords0[pair[0]] = self.regions[pair[0]].getCoordsForSegmented()
assert(all([x in cluster2coords0[pair[0]] for x in pair[1]]))
cluster2coords1 = {}
for pair in region_cluster_list1:
cluster2coords1[pair[0]] = self.regions[pair[0]].getCoordsForSegmented()
assert(all([x in cluster2coords1[pair[0]] for x in pair[1]]))
resKey = self.__make_de_res_key(region_cluster_list0[0][0], region_cluster_list0[0][1], region_cluster_list1[0][0], region_cluster_list1[0][1])
if scaled:
self.check_scaled()
if self.de_results_all is None:
self.de_results_all = defaultdict(lambda: dict())
sampleVec = []
conditionVec = []
exprData = pd.DataFrame()
for pair in region_cluster_list0:
region0 = pair[0]
clusters0 = pair[1]
masses = [("mass_" + str(x)).replace(".", "_") for x in self.regions[region0].idx2mass]
for clus in clusters0:
allPixels = cluster2coords0[region0][clus]
self.logger.info("Processing region {} cluster: {}".format(region0, clus))
bar = progressbar.ProgressBar()
if scaled:
dataArray = self.region_array_scaled[region0]
else:
dataArray = self.regions[region0].region_array
if sample_max > 0 and len(allPixels) > sample_max:
allPixels = random.sample(allPixels, sample_max)
for pxl in bar(allPixels):
pxl_name = "{}__{}__{}".format(region0, str(len(sampleVec)), "_".join([str(x) for x in pxl]))
sampleVec.append(pxl_name)
conditionVec.append(0)
exprData[pxl_name] = dataArray[pxl[0], pxl[1], :]#.astype('int')
for pair in region_cluster_list1:
region1 = pair[0]
clusters1 = pair[1]
for clus in clusters1:
self.logger.info("Processing region {} cluster: {}".format(region1, clus))
allPixels = cluster2coords1[region1][clus]
bar = progressbar.ProgressBar()
if scaled:
dataArray = self.region_array_scaled[region1]
else:
dataArray = self.regions[region1].region_array
if sample_max > 0 and len(allPixels) > sample_max:
allPixels = random.sample(allPixels, sample_max)
for pxl in bar(allPixels):
pxl_name = "{}__{}__{}".format(region1, str(len(sampleVec)), "_".join([str(x) for x in pxl]))
sampleVec.append(pxl_name)
conditionVec.append(1)
exprData[pxl_name] = dataArray[pxl[0], pxl[1], :]#.astype('int')
self.logger.info("DE DataFrame ready. Shape {}".format(exprData.shape))
pData = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Covid-19 Daily Deaths - UK
#
# Via: https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-daily-deaths/
#
# At the moment, each time this script runs it downloads all the daily datafiles and builds the db from scratch. We need to optimise things so that only new daily files are parsed and added, incrementally, to the database.
# +
import sqlite_utils
# #!rm nhs_dailies.db
DB = sqlite_utils.Database("nhs_dailies.db")
processed = DB['processed']
# Start on a mechanism for only downloading things we haven't already grabbed
# Need a better way to handle query onto table if it doesn't exist yet
try:
already_processed = | pd.read_sql("SELECT * FROM processed", DB.conn) | pandas.read_sql |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
rand_dataset = | pd.read_csv("sample_submission.csv") | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import tsplib95
import networkx as nx
from tqdm import tqdm
import sys
import re
def prepare_testset_FINDER(data_dir, scale_factor=0.000001):
graph_list = []
atoi = lambda text : int(text) if text.isdigit() else text
natural_keys = lambda text : [atoi(c) for c in re.split('(\d+)', text)]
fnames = os.listdir(data_dir)
fnames.sort(key=natural_keys)
print("Loading test graphs...")
for fname in tqdm(fnames):
try:
if not '.tsp' in fname or '.sol' in fname:
continue
problem = tsplib95.load(data_dir + fname)
g = problem.get_graph()
except:
print('Error, while loading file {}'.format(fname))
# remove edges from one node to itself
ebunch=[(k,k) for k in g.nodes]
g.remove_edges_from(ebunch)
# reset node index to start at zero
mapping = {k:i for i,k in enumerate(g.nodes)}
g = nx.relabel_nodes(g, mapping)
# scale size of the graphs such that it fits into 0,1 square
for node in g.nodes:
g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor
for edge in g.edges:
g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor
graph_list.append(g)
print("Number of loaded test graphs:",len(graph_list))
return graph_list, fnames
def prepare_testset_S2VDQN(folder, scale_factor=0.000001):
if folder[-1] == '/':
folder = folder[0:-1]
graph_list = []
fnames = []
print("Loading test graphs...")
with open(f'{folder}/paths.txt', 'r') as f:
for line in tqdm(f):
fname = line.split('/')[-1].strip()
file_path = '%s/%s' % (folder, fname)
try:
if not '.tsp' in fname or '.sol' in fname:
continue
problem = tsplib95.load(file_path)
g = problem.get_graph()
except:
print('Error, while loading file {}'.format(fname))
# remove edges from one node to itself
ebunch=[(k,k) for k in g.nodes]
g.remove_edges_from(ebunch)
# reset node index to start at zero
mapping = {k:i for i,k in enumerate(g.nodes)}
g = nx.relabel_nodes(g, mapping)
# scale size of the graphs such that it fits into 0,1 square
for node in g.nodes:
g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor
for edge in g.edges:
g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor
graph_list.append(g)
fnames.append(fname)
# print("Number of loaded test graphs:",len(graph_list))
return graph_list, fnames
def get_approx_ratios(data_dir, test_lengths):
fnames = get_fnames(data_dir)
true_lengths = []
len_dict = get_len_dict(data_dir)
for fname in fnames:
true_lengths.append(len_dict[fname])
approx_ratios = [length[0]/length[1] for length in zip(test_lengths, true_lengths)]
mean_approx_ratio = np.mean([length[0]/length[1] for length in zip(test_lengths, true_lengths)])
return approx_ratios, mean_approx_ratio
def get_fnames(dir, search_phrase='tsp'):
atoi = lambda text : int(text) if text.isdigit() else text
natural_keys = lambda text : [atoi(c) for c in re.split('(\d+)', text)]
try:
fnames = [f for f in os.listdir(dir) if os.path.isfile(f'{dir}/{f}')]
fnames.sort(key=natural_keys)
except:
print('\nBad directory!')
fnames = [fname for fname in fnames if search_phrase in fname]
return fnames
def get_len_dict(folder):
# get lengths
with open(f'{folder}/lengths.txt', 'r') as f:
lines = f.readlines()
file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]
test_lens = [float(line.split(':')[-1].strip()) for k, line in enumerate(lines)]
len_dict = dict(zip(file_names, test_lens))
return len_dict
def save_solutions(data_dir, solutions, model_name, suffix=''):
fnames = get_fnames(data_dir)
sol_df = pd.DataFrame()
idx = 0
tqdm.write("Saving solutions...")
for fname in tqdm(fnames):
if not '.tsp' in fname or '.sol' in fname:
continue
tmp_df = pd.DataFrame()
tmp_df[fname] = solutions[idx]
sol_df = pd.concat([sol_df,tmp_df.astype(int)], ignore_index=False, axis=1)
idx += 1
test_set_folder = data_dir.split("/")[-2]
test_set_name = data_dir.split("/")[-1]
result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'
model_name_short = '_'.join(model_name.split('_')[0:-4])
create_dir(result_path)
if suffix:
sol_df.to_csv(f'{result_path}/solutions_{model_name_short}_{suffix}.csv')
else:
sol_df.to_csv(f'{result_path}/solutions_{model_name_short}.csv')
def save_lengths(data_dir, lengths, model_name, suffix=''):
fnames = get_fnames(data_dir)
lens_df = pd.DataFrame()
idx = 0
tqdm.write("Saving solution lengths...")
for fname in tqdm(fnames):
if not '.tsp' in fname or '.sol' in fname:
continue
tmp_df = pd.DataFrame()
tmp_df[fname] = [lengths[idx]]
lens_df = pd.concat([lens_df,tmp_df], ignore_index=False, axis=1)
idx += 1
test_set_folder = data_dir.split("/")[-2]
test_set_name = data_dir.split("/")[-1]
result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'
model_name_short = '_'.join(model_name.split('_')[0:-4])
create_dir(result_path)
if suffix:
lens_df.to_csv(f'{result_path}/tour_lengths_{model_name_short}_{suffix}.csv')
else:
lens_df.to_csv(f'{result_path}/tour_lengths_{model_name_short}.csv')
def save_approx_ratios(data_dir, approx_ratios, model_name, suffix=''):
fnames = get_fnames(data_dir)
approx_df = pd.DataFrame()
idx = 0
tqdm.write("Saving approximation ratios...")
for fname in tqdm(fnames):
if not '.tsp' in fname or '.sol' in fname:
continue
tmp_df = pd.DataFrame()
tmp_df[fname] = [approx_ratios[idx]]
approx_df = pd.concat([approx_df,tmp_df], ignore_index=False, axis=1)
idx += 1
test_set_folder = data_dir.split("/")[-2]
test_set_name = data_dir.split("/")[-1]
result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'
model_name_short = '_'.join(model_name.split('_')[0:-4])
create_dir(result_path)
if suffix:
approx_df.to_csv(f'{result_path}/approx_ratios_{model_name_short}_{suffix}.csv')
else:
approx_df.to_csv(f'{result_path}/approx_ratios_{model_name_short}.csv')
def create_dir(save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def get_test_approx_ratios_for_model(test_set_names, model_name, search_strategy='greedy'):
mean_approx_ratios = []
std_approx_ratios = []
for test_set in test_set_names:
result_dir = f'../results/{model_name}/test_sets/{test_set}'
try:
fnames, approx_ratios, test_lengths, solutions = get_data_from_result_files(result_dir, search_strategy=search_strategy)
except:
# print(search_strategy)
# print('Using placeholders!')
approx_ratios = [np.nan]
mean_approx_ratios.append(np.mean(approx_ratios))
std_approx_ratios.append(np.std(approx_ratios))
return mean_approx_ratios, std_approx_ratios
def get_data_from_result_files(result_dir, search_strategy='greedy'):
# print(result_dir)
for f in os.listdir(result_dir):
if not search_strategy in f:
continue
if 'solution' in f:
sol_df = | pd.read_csv(f'{result_dir}/{f}', index_col=0) | pandas.read_csv |
import re
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
import scikit_posthocs as sp
import networkx as nx
from loguru import logger
from GEN_Utils import FileHandling
from utilities.database_collection import network_interactions, all_interactions, interaction_enrichment
logger.info('Import OK')
input_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
output_folder = 'results/lysate_denaturation/protein_interactions/'
confidence_threshold = 0.7
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# ------------------------------Read in clustered data------------------------------
# Read in standard components - hits & background
proteins = pd.read_excel(f'{input_path}', sheet_name='summary')
proteins = proteins.drop([col for col in proteins.columns.tolist() if 'Unnamed: ' in col], axis=1)[['Proteins', 'mixed', 'unique', 'count']]
proteins = pd.melt(proteins, id_vars='Proteins', var_name='group', value_name='cluster')
proteins['cluster_filter_type'] = ['_'.join([var, str(val)]) for var, val in proteins[['group', 'cluster']].values]
cluster_summary = proteins.groupby('cluster_filter_type').count()['Proteins'].reset_index()
# Test 1: Get intra-cluster interactions (i.e. interactions within a cluster)
intra_cluster_interactions = {}
for cluster_type, df in proteins.groupby('cluster_filter_type'):
gene_ids = df['Proteins'].unique()
intra_cluster_interactions[cluster_type] = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
# calculate number of interactions for which evidence is > 0.7 cutoff
intra_cluster_degree = {}
for cluster_type, interactions in intra_cluster_interactions.items():
filtered_ints = interactions[interactions['score'].astype(float) > confidence_threshold]
intra_cluster_degree[cluster_type] = len(filtered_ints)
cluster_summary['number_within_cluster'] = cluster_summary['cluster_filter_type'].map(intra_cluster_degree)
cluster_summary['normalised_within_cluster'] = cluster_summary['number_within_cluster'] / cluster_summary['Proteins']
# Test 2: Get intra-cluster interactions within whole interaction dataset vs inter-cluster interactions
gene_ids = proteins['Proteins'].unique()
interactions = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
interactions = interactions[interactions['score'].astype(float) > confidence_threshold] # less than half remain!
# calculate number of interactions for which evidence is > 0.7 cutoff
inter_vs_intra = {}
for cluster_type, df in proteins.groupby('cluster_filter_type'):
gene_ids = df['Proteins'].unique()
cluster_ints = interactions.copy()
cluster_ints['int_A'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_A']]
cluster_ints['int_B'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_B']]
cluster_ints['int_type'] = cluster_ints['int_A'] + cluster_ints['int_B']
inter_vs_intra[cluster_type] = cluster_ints['int_type'].value_counts()
inter_vs_intra = pd.DataFrame(inter_vs_intra).T.reset_index()
inter_vs_intra.columns = ['cluster_filter_type', 'not_in_cluster', 'outside_cluster', 'inside_cluster']
cluster_summary = | pd.merge(cluster_summary, inter_vs_intra, on='cluster_filter_type') | pandas.merge |
import re
import json
from itertools import filterfalse
from luigi import Task
from luigi.task import logger as luigi_logger
import pandas as pd
from lxml import objectify
from datapackage import Package
from ..utils import TargetOutput
from ..utils import SaltedOutput
from ..utils import SuffixPreservingLocalTarget as LocalTarget
from ..utils import convert_numeric_code_with_pad
from ..utils import convert_numeric_code
from ..utils import Requires
from ..utils import Requirement
from .data import SaltedFileSource
from .data import SaltedSTSSource
from .data import SaltedM49Source
from .data import SaltedEdgarSource
from .data import SOURCES
from .data import DEV_MODE
class UNCodes(Task):
__version__ = '0.1'
DATA_ROOT = 'build/'
pattern = '{task.__class__.__name__}-{salt}'
output = SaltedOutput(file_pattern=pattern, ext='.csv',
base_dir=DATA_ROOT,
target_class=LocalTarget)
requires = Requires()
unterm = Requirement(SaltedFileSource, slug='unterm', ext='.xlsx')
m49 = Requirement(SaltedM49Source, slug='M49', ext='.csv')
def run(self):
# Namibia's 2 letter codes are often `NA`, so setting
# `keep_default_na=False` and clearing `na_values` is essential!
m49 = pd.read_csv(self.requires().get('m49').output().path,
keep_default_na=False, na_values=['_'],
converters={'Country or Area_en':
lambda x: x.replace("Côte d’Ivoire", "Côte d'Ivoire"),
'M49 Code': convert_numeric_code_with_pad,
'Global Code': convert_numeric_code,
'Region Code': convert_numeric_code,
'Sub-region Code': convert_numeric_code,
'Intermediate Region Code': convert_numeric_code})
m49 = m49.add_suffix(' (M49)')
# UN Protocol liason office needs to update their names!
unterm_replacements = {
"Czech Republic": "Czechia",
"Swaziland": "Eswatini",
"the former Yugoslav Republic of Macedonia": "North Macedonia"}
def fix_english_short(text):
better = text.replace('(the)', '').replace('*', '').strip()
for old, new in unterm_replacements.items():
better = better.replace(old, new)
return better
unterm_converters = {'English Short': fix_english_short}
unterm = pd.read_excel(self.requires().get('unterm').output().path,
converters=unterm_converters)
unterm = unterm.add_suffix(' (unterm)')
include_indicator = bool(DEV_MODE)
un = pd.merge(m49, unterm, how='outer', indicator=include_indicator,
left_on='Country or Area_en (M49)',
right_on='English Short (unterm)')
with self.output().open('w') as f:
un.to_csv(f, index=False, float_format='%.0f')
class iso4217(Task):
__version__ = '0.1'
DATA_ROOT = 'build/'
pattern = '{task.__class__.__name__}-{salt}'
output = SaltedOutput(file_pattern=pattern, ext='.csv',
base_dir=DATA_ROOT,
target_class=LocalTarget)
requires = Requires()
iso4217 = Requirement(SaltedFileSource, slug='iso4217', ext='.xml')
def run(self):
as_xml = objectify.parse(self.requires().get('iso4217').output().path)
# currencies to skip
skip = ['EUROPEAN UNION', 'MEMBER COUNTRIES OF THE AFRICAN DEVELOPMENT BANK GROUP',\
'SISTEMA UNITARIO DE COMPENSACION REGIONAL DE PAGOS "SUCRE"']
# some country names to fix
currency_country_name_map = {
"CABO VERDE": "CAPE VERDE",
"CONGO (THE)": "CONGO",
"CONGO (THE DEMOCRATIC REPUBLIC OF THE)": "DEMOCRATIC REPUBLIC OF THE CONGO",
"HEARD ISLAND AND McDONALD ISLANDS": "HEARD ISLAND AND MCDONALD ISLANDS",
"HONG KONG": 'CHINA, HONG KONG SPECIAL ADMINISTRATIVE REGION',
"KOREA (THE DEMOCRATIC PEOPLE’S REPUBLIC OF)": "DEMOCRATIC PEOPLE'S REPUBLIC OF KOREA",
"KOREA (THE REPUBLIC OF)": "REPUBLIC OF KOREA",
"MACAO": "China, Macao Special Administrative Region",
"MACEDONIA (THE FORMER YUGOSLAV REPUBLIC OF)": "NORTH MACEDONIA",
"MOLDOVA (THE REPUBLIC OF)": "REPUBLIC OF MOLDOVA",
"PALESTINE, STATE OF": "State of Palestine",
"SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA": "Saint Helena",
"SVALBARD AND JAN MAYEN": "Svalbard and Jan Mayen Islands",
"TANZANIA, UNITED REPUBLIC OF": "United Republic of Tanzania",
"VIRGIN ISLANDS (U.S.)": "UNITED STATES VIRGIN ISLANDS",
"VIRGIN ISLANDS (BRITISH)": "BRITISH VIRGIN ISLANDS",
"WALLIS AND FUTUNA": "Wallis and Futuna Islands",
}
as_lists = []
seen = []
for iso_currency_table in as_xml.iter():
if isinstance(iso_currency_table, objectify.ObjectifiedElement):
# get strings rather than an ObjectifiedElements
# because they can be truthy while their .text is None!
currency = [c.text for c in iso_currency_table.getchildren()]
if currency:
if currency[0]:
if currency[0] in skip or currency[0].startswith('ZZ') or currency[0].startswith('INTERNATIONAL'):
continue
if currency[0] in currency_country_name_map:
# correct the few names that do not match M49 names
currency[0] = currency_country_name_map.get(currency[0])
if currency[0] not in seen:
# source includes additional, commonly used or
# accepted currencies from other states.
# we keep only the first/most official currency
seen.append(currency[0])
as_lists.append(currency)
columns = ['Country Name (iso4217)', 'Currency Name (iso4217)',\
'Currency Code Alpha (iso4217)', 'Currency Code Numeric (iso4217)',\
'Currency Minor Units (iso4217)']
df = pd.DataFrame(data=as_lists, columns=columns)
with self.output().open('w') as f:
df.to_csv(f, index=False, float_format='%.0f')
class marc(Task):
__version__ = '0.1'
DATA_ROOT = 'build/'
pattern = '{task.__class__.__name__}-{salt}'
output = SaltedOutput(file_pattern=pattern, ext='.csv',
base_dir=DATA_ROOT,
target_class=LocalTarget)
requires = Requires()
marc = Requirement(SaltedFileSource, slug='marc', ext='.xml')
def run(self):
as_xml = objectify.parse(self.requires().get('marc').output().path)
as_lists = []
for territory in as_xml.getroot().countries.iterchildren():
if isinstance(territory, objectify.ObjectifiedElement):
stuff = [c.text for c in territory.getchildren()
if c.text and not c.text.startswith('info')]
if stuff:
if stuff[2] in ['North America'] and stuff[0] not in ['Greenland', 'Canada', 'United States', 'Mexico']:
# skip US States and Canadian Provinces
continue
if stuff[2] in ['Australasia'] and stuff[0] not in ['Australia', 'New Zealand', 'Tazmania']:
# skip Australian States and Territories
continue
if len(stuff) != 3:
if stuff[0] in ['Indonesia', 'Yemen']:
# names are repeated twice for these
stuff.pop(0)
if stuff[0] in ['Anguilla']:
# discard extra obsolete code
stuff.pop()
as_lists.append(stuff)
columns = ['Country Name (marc)', 'Marc Code (marc)',
'Continent (marc)']
df = | pd.DataFrame(data=as_lists, columns=columns) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
import re
import sys
import os
import argparse
import pandas as pd
from pprint import pprint
from collections import OrderedDict
from operator import itemgetter
import json
def main():
# Parse args
args = parse_args()
# read manifest + EFO mappings
FINNGEN_EFO=pd.read_csv(args.in_EFO, sep='\t',
header=0)
FINNGEN_manifest = ( | pd.read_json(args.in_manifest, lines=True) | pandas.read_json |
"""
"""
import pandas as pd
import os, sys
import numpy as np
# tie-breaker for RQ1
TIE = 'avg' #'MAX'
def read_and_add_flag(filename):
"""
"""
df = pd.read_csv(filename)
indices = df.index.values
df['flag'] = df.true == df.pred
return df
def compute_acc(init_pred_df, aft_pred_df, classes):
"""
"""
from sklearn.metrics import accuracy_score
# per class
acc_per_class = {}
for class_label in classes:
to_target = init_pred_df.true.values == class_label
to_target_aft = aft_pred_df.true.values == class_label
assert all(to_target == to_target_aft), class_label
labels = init_pred_df.loc[to_target].true.values
init_pred_for_class = init_pred_df.loc[to_target].pred.values
aft_pred_for_class = aft_pred_df.loc[to_target].pred.values
init_acc_for_class = accuracy_score(labels, init_pred_for_class)
aft_acc_for_class = accuracy_score(labels, aft_pred_for_class)
init_acc_for_class = np.sum([l==p for l,p in zip(init_pred_for_class,labels)])/np.sum(to_target)
aft_acc_for_class = np.sum([l==p for l,p in zip(aft_pred_for_class,labels)])/np.sum(to_target)
acc_per_class[class_label] = {'init':init_acc_for_class, 'aft':aft_acc_for_class}
return acc_per_class
def combine_init_aft_predcs(init_pred_df, aft_pred_df):
"""
"""
# combine
combined_df = | pd.DataFrame(data = {
'true':init_pred_df.true.values,
'pred':init_pred_df.pred.values,
'new_pred':aft_pred_df.pred.values,
'init_flag':init_pred_df.flag}) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = | pd.DataFrame(result) | pandas.DataFrame |
import pandas as pd
import itertools
class Aprioripy:
def __init__(self,
table,
convert=True,
items=[],
excluded_items=[],
positive_label=1):
def converter(table):
item_list = list(set(sum([x.split(", ") for x in table["items"].tolist()], [])))
item_list.sort()
new_table = []
for row in table["items"].tolist():
table = {}
for item in item_list:
if item in row:
table[item] = 1
else:
table[item] = 0
new_table.append(table)
table = | pd.DataFrame(new_table) | pandas.DataFrame |
from cmath import nan
from tokenize import endpats
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import chart_studio.plotly as py
import plotly.graph_objects as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.express as px
init_notebook_mode(connected=True)
import cufflinks as cf
cf.go_offline()
import numpy as np
import pandas as pd
from geomstats.information_geometry.categorical import CategoricalDistributions, CategoricalMetric
class CategoricalDistributionsManifold:
r""" Class for visualizing the manifold of categorical distributions.
This is the set of $n+1$-tuples of positive reals that sum up to one,
i.e. the $n$-simplex. Each point is the parameter of a categorical
distribution, i.e. gives the probabilities of $n$ different outcomes
in a single experiment.
Attributes:
-----------
dim : integer
Dimension of the manifold.
points: array-like, [[..., dim + 1], [..., dim + 1], ... ]
Discrete points to be plotted on the manifold.
Notes:
------
The class only implements visualization methods for 2D and 3D manifolds.
"""
def __init__(self, dim):
""" Construct a CategoricalDistributionsManifold object.
Construct a CategoricalDistributionsManifold with a given dimension.
Parameters:
-----------
dim : integer
Dimension of the manifold
Returns:
--------
None.
Notes:
------
dim should be a positive integer.
The methods only support visualization of 2-D and 3-D manifolds.
"""
self.dim = dim
self.points = []
self.ax = None
self.elev, self.azim = None, None
self.metric = CategoricalMetric(dim = self.dim)
self.dist = CategoricalDistributions(dim = self.dim)
def plot(self):
""" Plot the 2D or 3D Manifold.
Plot the 2D Manifold as a regular 2-simplex(triangle) or
the 3D Manifold as a regular 3-simplex(tetrahedral).
Parameters
----------
None.
Returns
-------
None.
Notes
-----
This method only works properly if the dimension is 2 or 3.
References
----------
Simplex: https://en.wikipedia.org/wiki/Simplex
"""
min_limit = 0
max_limit = 1
plt.figure(dpi = 100)
self.set_axis(min_limit, max_limit)
if self.dim == 3:
self.set_view()
x = [0, 1, 0, 0]
y = [0, 0, 1, 0]
z = [0, 0, 0, 1]
vertices = [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
tupleList = list(zip(x, y, z))
poly3d = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] for ix in range(len(vertices))]
self.ax.add_collection3d(Poly3DCollection(poly3d, edgecolors='k', facecolors=(0.9, 0.9, 0.9, 1.0), linewidths=3, alpha=0.2))
elif self.dim == 2:
X = np.linspace(start = min_limit, stop = max_limit, num = 101, endpoint = True)
Y = 1 - X
self.ax.fill_between(X, Y, color = (0.9, 0.9, 0.9, 1.0))
self.ax.set_title("2 Dimension Categorical Manifold")
def plot3D(self):
""" Plot the 3D Manifold using Plotly.
Plot the 3D Manifold as a regular 3-simplex(tetrahedral).
Parameters
----------
None.
Returns
-------
None.
Notes
-----
This method only works properly if the dimension is 3.
"""
if self.dim != 3:
print('Invalid Dimension')
return
fig = go.Figure(data=[go.Mesh3d(
x = [0, 1, 0, 0],
y = [0, 0, 1, 0],
z = [0, 0, 0, 1],
colorbar_title='z',
colorscale=[[0, 'gold'],
[0.5, 'mediumturquoise'],
[1, 'magenta']],
# Intensity of each vertex, which will be interpolated and color-coded
intensity=[0, 0.33, 0.66, 1],
# i, j and k give the vertices of triangles
# here we represent the 4 triangles of the tetrahedron surface
i=[0, 0, 0, 1],
j=[1, 2, 3, 2],
k=[2, 3, 1, 3],
name='y',
showscale=False,
opacity=0.25
)
]).update(layout=dict(title=dict(x=0.5)))
fig.update_layout(title_text='3 Dimension Categorical Manifold')
fig.show()
def set_points(self, points):
self.points = points
def scatter(self, n_samples, **scatter_kwargs):
""" Scatter plot some randomly sampled points in the manifold.
Plot the manifold along with some randomly sampled points
lying on the manifold.
Parameters:
-----------
n_samples : integer
The number of randomly sampled points.
**scatter_kwargs: optional
Inherits the matplotlib scatter function parameters.
Returns:
--------
None.
Notes:
------
This method internally calls the plot method.
"""
self.set_points(self.dist.random_point(n_samples=n_samples))
if self.dim == 3:
# Plot 3D Mesh with Sample Scatter Points in Plotly
df = pd.DataFrame(self.points, columns = ['x1', 'x2','x3','x4'])
scatter = px.scatter_3d(df,x = 'x1',
y = 'x2',
z = 'x3',
color = 'x4',
title = f'3D Scatterplot for {n_samples} Samples',
opacity = 0.5)
scatter.update_traces(marker_size = 3)
mesh = go.Figure(data=[
go.Mesh3d(
x = [0, 1, 0, 0],
y = [0, 0, 1, 0],
z = [0, 0, 0, 1],
colorbar_title='z',
colorscale=[[0, 'gold'],
[0.5, 'mediumturquoise'],
[1, 'magenta']],
# Intensity of each vertex, which will be interpolated and color-coded
intensity=[0, 0.33, 0.66, 1],
# i, j and k give the vertices of triangles
# here we represent the 4 triangles of the tetrahedron surface
i=[0, 0, 0, 1],
j=[1, 2, 3, 2],
k=[2, 3, 1, 3],
name='y',
showscale=False,
opacity=0.25,
hoverinfo='skip',
hovertemplate=None
)
])
mesh.update_traces(
hovertemplate=None,
hoverinfo='skip'
)
fig = go.Figure(data=scatter.data+mesh.data)
fig.update_layout(title_text=f'3D Scatterplot for {n_samples} Samples')
fig.show()
elif self.dim == 2:
self.plot()
for point in self.points:
self.ax.scatter(point[0], point[1], **scatter_kwargs)
self.ax.set_title(f'2 Dimension Categorical Manifold with {n_samples} Samples')
self.clear_points()
def plot_geodesic(self, initial_point, end_point = None, tangent_vector = None):
""" Plot a geodesic on the manifold.
Plot a geodesic that is either specified with
1) an initial_point and an end_point, or
2) an initial point and an initial tangent vector
on the manifold.
Parameters:
-----------
initial_point: array-like, shape = [..., dim + 1]
Initial point on the manifold.
end_point: optional, array-like, shape = [..., dim + 1]
End point on the manifold.
tangent_vector: optional, array-like, shape = [..., dim + 1]
Initial tangent vector at the initial point.
Returns:
--------
None.
Notes:
------
Either end_point or tangent_vector needs to be specified.
The initial point will be marked red.
The initial tangent vector will also be plotted starting from the initial point.
"""
self.plot()
geodesic = self.metric.geodesic(initial_point=initial_point, end_point = end_point, initial_tangent_vec = tangent_vector)
num_samples = 200
if self.dim == 3:
for i in range(num_samples):
point = geodesic(i/num_samples)
self.ax.scatter(point[0], point[1], point[2], color='blue', s = 2)
self.ax.scatter(geodesic(0)[0], geodesic(0)[1], geodesic(0)[2], color='red', s = 30)
if tangent_vector is not None:
normalized_tangent_vector = tangent_vector/np.sum(np.power(tangent_vector, 2))
self.ax.quiver(
initial_point[0],
initial_point[1],
initial_point[2],
normalized_tangent_vector[0],
normalized_tangent_vector[1],
normalized_tangent_vector[2],
color = 'red',
length = 0.1,
normalize = True
)
geodesic = self.metric.geodesic(initial_point=initial_point, end_point = end_point, initial_tangent_vec = tangent_vector)
num_samples = 100
geodesic_points = np.zeros(shape=(num_samples,4))
if self.dim == 3:
for i in range(num_samples):
point = geodesic(i/num_samples)
geodesic_points[i] = point
df = | pd.DataFrame(geodesic_points, columns = ['x1', 'x2','x3','x4']) | pandas.DataFrame |
import h5py
from pathlib import Path
from typing import Union, Tuple
import pickle
import json
import os
import gc
from tqdm import tqdm
import numpy as np
import pandas as pd
# TODO output check, verbose
def load_all_libsdata(path_to_folder: Union[str, Path]) -> Tuple[pd.DataFrame, list, pd.Series]:
"""
Function for loading .libsdata and corresponding .libsmetadata files. Scans
the entire folder for any such files.
Args:
path_to_folder (str or Path) : path to the folder to be scanned.
Returns:
pd.DataFrame : combined .libsdata files
list : list of .libsmetadata files
pd.Series : list of file labels for each entry. Can be used to connect each
entry to the file it originated from.
"""
data, metadata, samples = [], [], []
if isinstance(path_to_folder, str):
path_to_folder = Path(path_to_folder)
for f in tqdm(path_to_folder.glob('**/*.libsdata')):
try:
meta = json.load(open(f.with_suffix('.libsmetadata'), 'r'))
except:
print('[WARNING] Failed to load metadata for file {}! Skipping!!!'.format(f))
continue
df = np.fromfile(open(f, 'rb'), dtype=np.float32)
df = np.reshape(df, (meta['spectra'] + 1, meta['wavelengths']))
df = pd.DataFrame(df[1:], columns=df[0])
data.append(df)
metadata.append(meta)
samples += [f.stem.split('_')[0] for _ in range(len(df))]
data = pd.concat(data, ignore_index=True)
samples = pd.Series(samples)
return data, metadata, samples
def load_libsdata(path_to_file: Union[str, Path]) -> Tuple[pd.DataFrame, dict]:
"""
Function for loading a .libsdata and the corresponding .libsmetadata file.
Args:
path_to_file (str or Path) : path to the .libsdata or .libsmetadata file
to be loaded. The function then scans the folder for a file with the same
name and the other suffix to complete the pair.
Returns:
pd.DataFrame : loaded data file
dict : metadata
"""
data, metadata = None, None
if isinstance(path_to_file, str):
path_to_file = Path(path_to_file)
for f in path_to_file.parents[0].iterdir():
if path_to_file.stem in f.stem:
if f.suffix == '.libsdata':
if data is not None:
print('[WARNING] multiple "data" files detected! Using first found!!!')
else:
data = np.fromfile(open(f, 'rb'), dtype=np.float32)
elif f.suffix == '.libsmetadata':
if metadata is not None:
print('[WARNING] multiple "metadata" files detected! Using first found!!!')
else:
metadata = json.load(open(f))
else:
print('[WARNING] unrecognized extension for file {}! Skipping!!!'.format(f))
continue
if data is None or metadata is None:
raise ValueError('Data or metadata missing!')
data = np.reshape(data, (int(metadata['spectra']) + 1, int(metadata['wavelengths'])))
data = pd.DataFrame(data[1:], columns=data[0])
return data, metadata
def load_contest_test_dataset(path_to_data: Union[Path, str], min_block: int=0, max_block: int=-1) -> Tuple[pd.DataFrame, pd.Series]:
"""
Function for loading the contest test dataset.
Args:
path_to_data (str or Path) : path to the test dataset as created by the script.
min_block (int) : Allows for the selection of a specific block from the
original dataset. The function slices between <min_block>
and <max_block>.
max_block (int) : Allows for the selection of a specific block from the
original dataset. The function slices between <min_block>
and <max_block>.
Returns:
pd.DataFrame : X
pd.Series : y
"""
# TODO utilize a more abstract function for loading h5 data
# TODO add downloading
if isinstance(path_to_data, str):
path_to_data = Path(path_to_data)
test_data = np.ndarray((20000, 40002))
with h5py.File(path_to_data, 'r') as test_file:
wavelengths = train_file["Wavelengths"]["1"][:]
for i_block, block in tqdm(test_file["UNKNOWN"].items()[min_block:max_block]):
spectra = block[:].transpose()
for i_spec in range(10000):
test_data[(10000*(int(i_block)-1))+i_spec] = spectra[i_spec]
del spectra
test = pd.DataFrame(test_data, columns=wavelengths)
labels = pd.DataFrame.pop('label')
return test, labels
def load_contest_train_dataset(path_to_data: Union[Path, str], spectra_per_sample: int=100) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:
"""
Function for loading the contest train dataset.
Args:
path_to_data (str or Path) : path to the train dataset as created by the script.
spectra_per_sample (int) : how many spectra will be taken from each sample.
Returns:
pd.DataFrame : X
pd.Series : y
pd.Series : list of sample labels for each entry. Can be used to connect each
entry to the file it originated from.
"""
if isinstance(path_to_data, str):
path_to_data = Path(path_to_data)
with h5py.File(path_to_data, 'r') as train_file:
# Store wavelengths (calibration)
wavelengths = pd.Series(train_file['Wavelengths']['1'])
wavelengths = wavelengths.round(2).drop(index=[40000, 40001])
# Store class labels
labels = pd.Series(train_file['Class']['1']).astype(int)
# Store spectra
samples_per_class = labels.value_counts(sort=False) // 500
spectra = np.empty(shape=(0, 40000))
samples = []
classes = []
lower_bound = 1
for i_class in tqdm(samples_per_class.keys()):
for i_sample in range(lower_bound, lower_bound + samples_per_class[i_class]):
sample = train_file["Spectra"][f"{i_sample:03d}"]
sample = np.transpose(sample[:40000, :spectra_per_sample])
spectra = np.concatenate([spectra, sample])
samples.extend(np.repeat(i_sample, spectra_per_sample))
classes.extend(np.repeat(i_class, spectra_per_sample))
lower_bound += samples_per_class[i_class]
samples = | pd.Series(samples) | pandas.Series |
#%%
# CARGO LOS DATASETS
import pandas as pd
import numpy as np
from shapely.geometry import Point
import shapely as shp
import geopandas as gpd
from geopandas.array import points_from_xy
path = "merged1_listas.pkl"
df_merge1 = pd.read_pickle(path)
df_merge1.reset_index(inplace=True)
#%%
#region PART 1: DESDE EL PRIMER MERGE HASTA COMPLETAR SPEAKERS
#Pasamos todas las string-lista a listas de verdad por si quedó alguna
def sep(x):
"""para pasar posibles string-listas a listas-listas"""
if (isinstance(x, str)) and ("[" in x):
return x.replace("'", "").strip("][").split(", ")
else:
return x
def sep_float(x):
"""para pasar posibles string-listas-float a listas-float"""
if (isinstance(x, str)) and ("[" in x):
lista = x.replace("'", "").strip("][").split(", ")
return [float(x_n) for x_n in lista]
elif isinstance(x,list):
return [float(x_n) for x_n in x]
else:
print(x)
return float(x)
#para cargar los points en geopandas como coordenadas
def topoint(x):
if isinstance(x, list):
return [shp.wkt.loads(point) for point in x]
else:
return shp.wkt.loads(x)
#%%
df1 = df_merge1.applymap(sep)
print('Ahora las columnas problematicas solo tienen listas-listas \n\n#Listas por columna:')
print(df1.applymap(lambda x: isinstance(x,list)).sum())
print('\nY las que tienen valores numéricos no tienen ningún string \n\n#Strings por columna:')
print(df1.applymap(lambda x: isinstance(x,str)).sum())
#%%
# MACROAREA: llenamos a mano los 14 NaN que encontramos
macronan = df1[df1["Macroarea"].isnull()]
# Agregamos macroareas en una copia del dataframe original
df2 = df1.copy()
macroareas_faltantes = [
"Papunesia",
"South America",
"Africa",
"Eurasia",
"Eurasia",
"Eurasia",
"Australia",
"Australia",
"North America",
"North America",
"Australia",
"Eurasia",
"Africa",
"Australia",
]
df2.loc[macronan.index, "Macroarea"] = macroareas_faltantes
#%%
# LENGUAJE DE SEÑAS: Quitamos las filas correspondientes
# cambio el nombre de la columna porque se confunde con los espacios y la ñ
df3 = df2.rename(columns={"Lenguaje de Señas": "senas"})
indexNames = df3[df3["senas"] == 1].index
df3.drop(indexNames, inplace=True)
df3 = df3.reset_index(drop=True)
#%%
# NUM_SPEAKER: Despues de haber chequado algunos datos en Ethnologe
# concluimos que le valor mas grande que aparece en la lista es el
# nro de hablantes total. Nos quedamos solo con este elemento
# de cada lista
# entradas que son listas
list_bool = df3["num_speakers"].apply(
lambda x: isinstance(x, list)
)
# lista de las listas
num_speaker_lists = df3["num_speakers"][list_bool]
# con la función lambda tomo el máximo, reemplazo esos valores en el df
df3.loc[num_speaker_lists.index, "num_speakers"] = num_speaker_lists.apply(
lambda x: max(x)
)
#%% COMENZAMOS CON EL ARMADO DE FEAUTURES
# Nueva feauture con la cantidad de paises
# Cantidad de elementos de la lista o no lista
def num_countries(x):
if isinstance(x,list):
return len(x)
else:
return 1
df3['cant_paises'] = df3['countryISO'].apply(lambda x: num_countries(x))
#%%
# Armo una nueva feauture hable de la cercania entre lenguajes
# La feauture sera cuantos lenguajes tiene a menos de cierta medida
# de distancia
#transformo los string-point a un objeto de la clase Point de shapely
df4 = df3.copy()
df4['Country_coord'] = df4['Country_coord'].apply(topoint)
col_latitude = []
col_longitude = []
for i, item in df4['Country_coord'].items():
if isinstance(item,Point):
col_latitude.append(item.x)
col_longitude.append(item.y)
elif isinstance(item,list):
sublist_lat = [subitem.x for subitem in item]
sublist_long = [subitem.y for subitem in item]
col_latitude.append(sublist_lat)
col_longitude.append(sublist_long)
df4['Country_lat'] = col_latitude
df4['Country_long'] = col_longitude
#%%
#esta parte la comento porque tarda en correr, por las dudas
# largo = len(df4)
# col_nearest = np.zeros(largo)
# print('Corremos el loop de euge para armar la feature nearest languages')
# for i in range(largo):
# print(i)
# if df4["cant_paises"][i] == 1:
# lat = df4["Latitude"][i]
# long = df4["Longitude"][i]
# cercanos = 0
# for j in range(largo):
# if j != i:
# if df4["cant_paises"][j] == 1:
# lat1 = df4["Latitude"][j]
# long1 = df4["Longitude"][j]
# dif_lat = lat1 - lat
# dif_long = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# for k in range(df4["cant_paises"][j]):
# lat1 = df4["Country_lat"][j][k]
# long1 = df4["Country_long"][j][k]
# dif_lat = lat1 - lat
# dif_long1 = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# cercanos = 0
# for m in range(df4["cant_paises"][i]):
# lat = df4["Country_lat"][i][m]
# long = df4["Country_long"][i][m]
# for j in range(largo):
# if j != i:
# if df4["cant_paises"][j] == 1:
# lat1 = df4["Latitude"][j]
# long1 = df4["Longitude"][j]
# dif_lat = lat1 - lat
# dif_long = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# else:
# for k in range(df4["cant_paises"][j]):
# lat1 = df4["Country_lat"][j][k]
# long1 = df4["Country_long"][j][k]
# dif_lat = lat1 - lat
# dif_long1 = long1 - long
# if (abs(dif_lat) < 10) and (abs(dif_long) < 10):
# cercanos = cercanos + 1
# col_nearest[i] = cercanos
#endregion
#%%
# df4.to_pickle('paso1.pkl')
# df4.to_csv('paso1.csv')
#%%
#region PART 2: AGREGAR SPEAKERS DE CADA UNA AL DF PRINCIPAL
#df4 = pd.read_pickle('paso1.pkl')
#df_principal = df4.copy()
ingrid = pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/completados_INGRID.csv')
euge = pd.read_excel('/home/ingrid/Documents/labodatos/TP_final/df_principal/df_num_speakers_euge_arreglado_v2.xlsx')
mai = pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/completados_MAIA.csv')
romi = pd.read_csv('/home/ingrid/Documents/labodatos/TP_final/df_principal/romi_completos.csv')
#%%
#Los df merge1 no habian pasado por todos los pasos anteriores
#En uno de esos quitabamos varias filas, eso hizo lio con los indexados
#Con esto se acomodó:
df_principal = | pd.read_pickle('paso1.pkl') | pandas.read_pickle |
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
Gates are traditionally used to subset single cell data in one
or two dimensional space by hand-drawn polygons in a manual and laborious
process. cytopy attempts to emulate this using autonomous gates, driven
by unsupervised learning algorithms. The gate module contains the
classes that provide the infrastructure to apply these algorithms
to the context of single cell data whilst interacting with the underlying
database that houses our analysis.
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import typing
from cytopy.flow.transform import apply_transform
from .geometry import ThresholdGeom, PolygonGeom, inside_polygon, \
create_convex_hull, create_polygon, ellipse_to_polygon, probablistic_ellipse
from .population import Population, merge_multiple_gate_populations
from ..flow.sampling import faithful_downsampling, density_dependent_downsampling, upsample_knn, uniform_downsampling
from ..flow.dim_reduction import dimensionality_reduction
from ..flow.build_models import build_sklearn_model
from sklearn.cluster import *
from sklearn.mixture import *
from hdbscan import HDBSCAN
from shapely.geometry import Polygon as ShapelyPoly
from shapely.ops import cascaded_union
from string import ascii_uppercase
from collections import Counter
from typing import List, Dict
from functools import reduce
from KDEpy import FFTKDE
from detecta import detect_peaks
from scipy.signal import savgol_filter
import pandas as pd
import numpy as np
import mongoengine
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, cytopy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class Child(mongoengine.EmbeddedDocument):
"""
Base class for a gate child population. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
"""
name = mongoengine.StringField()
signature = mongoengine.DictField()
meta = {"allow_inheritance": True}
class ChildThreshold(Child):
"""
Child population of a Threshold gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
definition: str
Definition of population e.g "+" or "-" for 1 dimensional gate or "++" etc for 2 dimensional gate
geom: ThresholdGeom
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
definition = mongoengine.StringField()
geom = mongoengine.EmbeddedDocumentField(ThresholdGeom)
def match_definition(self,
definition: str):
"""
Given a definition, return True or False as to whether it matches this ChildThreshold's
definition. If definition contains multiples separated by a comma, or the ChildThreshold's
definition contains multiple, first split and then compare. Return True if matches any.
Parameters
----------
definition: str
Returns
-------
bool
"""
definition = definition.split(",")
return any([x in self.definition.split(",") for x in definition])
class ChildPolygon(Child):
"""
Child population of a Polgon or Ellipse gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
geom: ChildPolygon
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
geom = mongoengine.EmbeddedDocumentField(PolygonGeom)
class Gate(mongoengine.Document):
"""
Base class for a Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_df)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_df call
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
"quantile" or correspond to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
gate_name = mongoengine.StringField(required=True)
parent = mongoengine.StringField(required=True)
x = mongoengine.StringField(required=True)
y = mongoengine.StringField(required=False)
transform_x = mongoengine.StringField(required=False, default=None)
transform_y = mongoengine.StringField(required=False, default=None)
transform_x_kwargs = mongoengine.DictField()
transform_y_kwargs = mongoengine.DictField()
sampling = mongoengine.DictField()
dim_reduction = mongoengine.DictField()
ctrl_x = mongoengine.StringField()
ctrl_y = mongoengine.StringField()
ctrl_classifier = mongoengine.StringField(default="XGBClassifier")
ctrl_classifier_params = mongoengine.DictField()
ctrl_prediction_kwargs = mongoengine.DictField()
method = mongoengine.StringField(required=True)
method_kwargs = mongoengine.DictField()
children = mongoengine.EmbeddedDocumentListField(Child)
meta = {
'db_alias': 'core',
'collection': 'gates',
'allow_inheritance': True
}
def __init__(self, *args, **values):
method = values.get("method", None)
assert method is not None, "No method given"
err = f"Module {method} not supported. See docs for supported methods."
assert method in ["manual", "density", "quantile", "time", "AND", "OR", "NOT"] + list(globals().keys()), err
super().__init__(*args, **values)
self.model = None
self.x_transformer = None
self.y_transformer = None
if self.ctrl_classifier:
params = self.ctrl_classifier_params or {}
build_sklearn_model(klass=self.ctrl_classifier, **params)
self.validate()
def transform(self,
data: pd.DataFrame) -> pd.DataFrame:
"""
Transform dataframe prior to gating
Parameters
----------
data: Pandas.DataFrame
Returns
-------
Pandas.DataFrame
Transformed dataframe
"""
if self.transform_x is not None:
kwargs = self.transform_x_kwargs or {}
data, self.x_transformer = apply_transform(data=data,
features=[self.x],
method=self.transform_x,
return_transformer=True,
**kwargs)
if self.transform_y is not None and self.y is not None:
kwargs = self.transform_y_kwargs or {}
data, self.y_transformer = apply_transform(data=data,
features=[self.y],
method=self.transform_y,
return_transformer=True,
**kwargs)
return data
def transform_info(self) -> (dict, dict):
"""
Returns two dictionaries describing the transforms and transform settings applied to each variable
this gate acts upon
Returns
-------
dict, dict
Transform dict ({x-variable: transform, y-variable: transform}),
Transform kwargs dict ({x-variable: transform kwargs, y-variable: transform kwargs})
"""
transforms = [self.transform_x, self.transform_y]
transform_kwargs = [self.transform_x_kwargs, self.transform_y_kwargs]
transforms = {k: v for k, v in zip([self.x, self.y], transforms) if k is not None}
transform_kwargs = {k: v for k, v in zip([self.x, self.y], transform_kwargs) if k is not None}
return transforms, transform_kwargs
def _downsample(self,
data: pd.DataFrame) -> pd.DataFrame or None:
"""
Perform down-sampling prior to gating. Returns down-sampled dataframe or
None if sampling method is undefined.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
Pandas.DataFrame or None
Raises
------
AssertionError
If sampling kwargs are missing
"""
data = data.copy()
if self.sampling.get("method", None) == "uniform":
n = self.sampling.get("n", None) or self.sampling.get("frac", None)
assert n is not None, "Must provide 'n' or 'frac' for uniform downsampling"
return uniform_downsampling(data=data, sample_size=n)
if self.sampling.get("method", None) == "density":
kwargs = {k: v for k, v in self.sampling.items()
if k not in ["method", "features"]}
features = [f for f in [self.x, self.y] if f is not None]
return density_dependent_downsampling(data=data,
features=features,
**kwargs)
if self.sampling.get("method", None) == "faithful":
h = self.sampling.get("h", 0.01)
return faithful_downsampling(data=data.values, h=h)
raise ValueError("Invalid downsample method, should be one of: 'uniform', 'density' or 'faithful'")
def _upsample(self,
data: pd.DataFrame,
sample: pd.DataFrame,
populations: List[Population]) -> List[Population]:
"""
Perform up-sampling after gating using KNN. Returns list of Population objects
with index updated to reflect the original data.
Parameters
----------
data: Pandas.DataFrame
Original data, prior to down-sampling
sample: Pandas.DataFrame
Sampled data
populations: list
List of populations with assigned indexes
Returns
-------
list
"""
sample = sample.copy()
sample["label"] = None
for i, p in enumerate(populations):
sample.loc[sample.index.isin(p.index), "label"] = i
sample["label"].fillna(-1, inplace=True)
labels = sample["label"].values
sample.drop("label", axis=1, inplace=True)
new_labels = upsample_knn(sample=sample,
original_data=data,
labels=labels,
features=[i for i in [self.x, self.y] if i is not None],
verbose=self.sampling.get("verbose", True),
scoring=self.sampling.get("upsample_scoring", "balanced_accuracy"),
**self.sampling.get("knn_kwargs", {}))
for i, p in enumerate(populations):
new_idx = data.index.values[np.where(new_labels == i)]
if len(new_idx) == 0:
raise ValueError(f"Up-sampling failed, no events labelled for {p.population_name}")
p.index = new_idx
return populations
def _dim_reduction(self,
data: pd.DataFrame):
"""
Experimental!
Perform dimension reduction prior to gating. Returns dataframe
with appended columns for embeddings
Parameters
----------
data: Pandas.DataFrame
Data to reduce
Returns
-------
Pandas.DataFrame
"""
method = self.dim_reduction.get("method", None)
if method is None:
return data
kwargs = {k: v for k, v in self.dim_reduction.items() if k != "method"}
data = dimensionality_reduction(data=data,
features=kwargs.get("features", data.columns.tolist()),
method=method,
n_components=2,
return_embeddings_only=False,
return_reducer=False,
**kwargs)
self.x = f"{method}1"
self.y = f"{method}2"
return data
def _xy_in_dataframe(self,
data: pd.DataFrame):
"""
Assert that the x and y variables defined for this gate are present in the given
DataFrames columns
Parameters
----------
data: Pandas.DataFrame
Returns
-------
None
Raises
-------
AssertionError
If required columns missing from provided data
"""
assert self.x in data.columns, f"{self.x} missing from given dataframe"
if self.y:
assert self.y in data.columns, f"{self.y} missing from given dataframe"
def reset_gate(self) -> None:
"""
Removes existing children and resets all parameters.
Returns
-------
None
"""
self.children = []
class ThresholdGate(Gate):
"""
ThresholdGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The ThresholdGate subsets data based on the properties of the estimated probability
density function of the underlying data. For each axis, kernel density estimation
(KDEpy.FFTKDE) is used to estimate the PDF and a straight line "threshold" applied
to the region of minimum density to separate populations.
This is achieved using a peak finding algorithm and a smoothing procedure, until either:
* Two predominant "peaks" are found and the threshold is taken as the local minima
between there peaks
* A single peak is detected and the threshold is applied as either the quantile
given in method_kwargs or the inflection point on the descending curve.
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_threshold and y_threshold (if two-dimensional) to "method_kwargs", or "method"
can be "quantile", where the threshold will be drawn at the given quantile, defined by
"q" in "method_kwargs".
Additional kwargs to control behaviour of ThresholdGate when method is "density"
can be given in method_kwargs:
* kernel (default="guassian") - kernel used for KDE calculation
(see KDEpy.FFTKDE for avialable kernels)
* bw (default="silverman") - bandwidth to use for KDE calculation, can either be
"silverman" or "ISJ" or a float value (see KDEpy)
* min_peak_threshold (default=0.05) - percentage of highest recorded peak below
which peaks are ignored. E.g. 0.05 would mean any peak less than 5% of the
highest peak would be ignored.
* peak_boundary (default=0.1) - bounding window around which only the highest peak
is considered. E.g. 0.1 would mean that peaks are assessed within a window the
size of peak_boundary * length of probability vector and only highest peak within
window is kept.
* inflection_point_kwargs - dictionary; see cytopy.data.gate.find_inflection_point
* smoothed_peak_finding_kwargs - dictionary; see cytopy.data.gate.smoothed_peak_finding
ThresholdGate supports control gating, whereby thresholds are fitted to control data
and then applied to primary data.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the samples in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_df)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_df call
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
or "quantile"
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildThreshold)
def add_child(self,
child: ChildThreshold) -> None:
"""
Add a new child for this gate. Checks that definition is valid and overwrites geom with gate information.
Parameters
----------
child: ChildThreshold
Returns
-------
None
Raises
------
AssertionError
If invalid definition
"""
if self.y is not None:
definition = child.definition.split(",")
assert all(i in ["++", "+-", "-+", "--"]
for i in definition), "Invalid child definition, should be one of: '++', '+-', '-+', or '--'"
else:
assert child.definition in ["+", "-"], "Invalid child definition, should be either '+' or '-'"
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x, child.geom.transform_y = self.transform_x, self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
self.children.append(child)
def _duplicate_children(self) -> None:
"""
Loop through the children and merge any with the same name.
Returns
-------
None
"""
child_counts = Counter([c.name for c in self.children])
if all([i == 1 for i in child_counts.values()]):
return
updated_children = []
for name, count in child_counts.items():
if count >= 2:
updated_children.append(merge_children([c for c in self.children if c.name == name]))
else:
updated_children.append([c for c in self.children if c.name == name][0])
self.children = updated_children
def label_children(self,
labels: dict) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be merged.
If drop is True, then children that are absent from the given dictionary will be dropped.
Parameters
----------
labels: dict
Mapping for new children name
Returns
-------
None
"""
for c in self.children:
c.name = labels.get(c.name)
self._duplicate_children()
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names.
Parameters
----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
labeled = list()
for c in self.children:
matching_populations = [p for p in new_populations if c.match_definition(p.definition)]
if len(matching_populations) == 0:
continue
elif len(matching_populations) > 1:
pop = merge_multiple_gate_populations(matching_populations, new_population_name=c.name)
else:
pop = matching_populations[0]
pop.population_name = c.name
labeled.append(pop)
return labeled
def _quantile_gate(self,
data: pd.DataFrame) -> list:
"""
Fit gate to the given dataframe by simply drawing the threshold at the desired quantile.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
list
List of thresholds (one for each dimension)
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
q = self.method_kwargs.get("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs when using quantile gate"
if self.y is None:
return [data[self.x].quantile(q)]
return [data[self.x].quantile(q), data[self.y].quantile(q)]
def _process_one_peak(self,
x: np.ndarray,
x_grid: np.array,
p: np.array,
peak_idx: int):
"""
Process the results of a single peak detected. Returns the threshold for
the given dimension.
Parameters
----------
d: str
Name of the dimension (feature) under investigation. Must be a column in data.
data: Pandas.DataFrame
Events dataframe
x_grid: numpy.ndarray
x grid upon which probability vector is estimated by KDE
p: numpy.ndarray
probability vector as estimated by KDE
Returns
-------
float
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
use_inflection_point = self.method_kwargs.get("use_inflection_point", True)
if not use_inflection_point:
q = self.method_kwargs.get("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs " \
"for desired quantile if use_inflection_point is False"
return np.quantile(x, q)
inflection_point_kwargs = self.method_kwargs.get("inflection_point_kwargs", {})
return find_inflection_point(x=x_grid,
p=p,
peak_idx=peak_idx,
**inflection_point_kwargs)
def _fit(self,
data: pd.DataFrame or dict) -> list:
"""
Internal method to fit threshold density gating to a given dataframe. Returns the
list of thresholds generated and the dataframe the threshold were generated from
(will be the downsampled dataframe if sampling methods defined).
Parameters
----------
data: Pandas.DataFrame
Returns
-------
List
"""
if self.method == "manual":
return self._manual()
self._xy_in_dataframe(data=data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
if self.method == "quantile":
thresholds = self._quantile_gate(data=data)
else:
thresholds = list()
for d in dims:
thresholds.append(self._find_threshold(data[d].values))
return thresholds
def _find_threshold(self, x: np.ndarray):
"""
Given a single dimension of data find the threshold point according to the
methodology defined for this gate and the number of peaks detected.
Parameters
----------
x: Numpy Array
Returns
-------
float
Raises
------
AssertionError
If no peaks are detected
"""
peaks, x_grid, p = self._density_peak_finding(x)
assert len(peaks) > 0, "No peaks detected"
if len(peaks) == 1:
threshold = self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
elif len(peaks) == 2:
threshold = find_local_minima(p=p, x=x_grid, peaks=peaks)
else:
threshold = self._solve_threshold_for_multiple_peaks(x=x, p=p, x_grid=x_grid)
return threshold
def _solve_threshold_for_multiple_peaks(self,
x: np.ndarray,
p: np.ndarray,
x_grid: np.ndarray):
"""
Handle the detection of > 2 peaks by smoothing the estimated PDF and
rerunning the peak finding algorithm
Parameters
----------
x: Numpy Array
One dimensional PDF
p: Numpy Array
Indices of detected peaks
x_grid: Numpy Array
Grid space PDF was generated in
Returns
-------
float
"""
smoothed_peak_finding_kwargs = self.method_kwargs.get("smoothed_peak_finding_kwargs", {})
smoothed_peak_finding_kwargs["min_peak_threshold"] = smoothed_peak_finding_kwargs.get(
"min_peak_threshold",
self.method_kwargs.get("min_peak_threshold", 0.05))
smoothed_peak_finding_kwargs["peak_boundary"] = smoothed_peak_finding_kwargs.get("peak_boundary",
self.method_kwargs.get(
"peak_boundary",
0.1))
p, peaks = smoothed_peak_finding(p=p, **smoothed_peak_finding_kwargs)
if len(peaks) == 1:
return self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
else:
return find_local_minima(p=p, x=x_grid, peaks=peaks)
def _density_peak_finding(self,
x: np.ndarray):
"""
Estimate the underlying PDF of a single dimension using a convolution based
KDE (KDEpy.FFTKDE), then run a peak finding algorithm (detecta.detect_peaks)
Parameters
----------
x: Numpy Array
Returns
-------
(Numpy Array, Numpy Array, Numpy Array)
Index of detected peaks, grid space that PDF is estimated on, and estimated PDF
"""
x_grid, p = (FFTKDE(kernel=self.method_kwargs.get("kernel", "gaussian"),
bw=self.method_kwargs.get("bw", "silverman"))
.fit(x)
.evaluate())
peaks = find_peaks(p=p,
min_peak_threshold=self.method_kwargs.get("min_peak_threshold", 0.05),
peak_boundary=self.method_kwargs.get("peak_boundary", 0.1))
return peaks, x_grid, p
def _manual(self) -> list:
"""
Wrapper called if manual gating method. Searches the method kwargs and returns static thresholds
Returns
-------
List
Raises
------
AssertionError
If x or y threshold is None when required
"""
x_threshold = self.method_kwargs.get("x_threshold", None)
y_threshold = self.method_kwargs.get("y_threshold", None)
assert x_threshold is not None, "Manual threshold gating requires the keyword argument 'x_threshold'"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_threshold = apply_transform(pd.DataFrame({"x": [x_threshold]}),
features=["x"],
method=self.transform_x,
**kwargs).x.values[0]
if self.y:
assert y_threshold is not None, "2D manual threshold gating requires the keyword argument 'y_threshold'"
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_threshold = apply_transform(pd.DataFrame({"y": [y_threshold]}),
features=["y"],
method=self.transform_y,
**kwargs).y.values[0]
thresholds = [i for i in [x_threshold, y_threshold] if i is not None]
return [float(i) for i in thresholds]
def _ctrl_fit(self,
primary_data: pd.DataFrame,
ctrl_data: pd.DataFrame):
"""
Estimate the thresholds to apply to dome primary data using the given control data
Parameters
----------
primary_data: Pandas.DataFrame
ctrl_data: Pandas.DataFrame
Returns
-------
List
List of thresholds [x dimension threshold, y dimension threshold]
"""
self._xy_in_dataframe(data=primary_data)
self._xy_in_dataframe(data=ctrl_data)
ctrl_data = self.transform(data=ctrl_data)
ctrl_data = self._dim_reduction(data=ctrl_data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.get("method", None) is not None:
primary_data, ctrl_data = self._downsample(data=primary_data), self._downsample(data=ctrl_data)
thresholds = list()
for d in dims:
fmo_threshold = self._find_threshold(ctrl_data[d].values)
peaks, x_grid, p = self._density_peak_finding(primary_data[d].values)
if len(peaks) == 1:
thresholds.append(fmo_threshold)
else:
if len(peaks) > 2:
t = self._solve_threshold_for_multiple_peaks(x=primary_data[d].values,
p=p,
x_grid=x_grid)
else:
t = find_local_minima(p=p, x=x_grid, peaks=peaks)
if t > fmo_threshold:
thresholds.append(t)
else:
thresholds.append(fmo_threshold)
return thresholds
def fit(self,
data: pd.DataFrame,
ctrl_data: pd.DataFrame or None = None) -> None:
"""
Fit the gate using a given dataframe. If children already exist will raise an AssertionError
and notify user to call `fit_predict`.
Parameters
----------
data: Pandas.DataFrame
Population data to fit threshold
ctrl_data: Pandas.DataFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
None
Raises
------
AssertionError
If gate Children have already been defined i.e. fit has been called previously
"""
data = data.copy()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
assert len(self.children) == 0, "Children already defined for this gate. Call 'fit_predict' to " \
"fit to new data and match populations to children, or call " \
"'predict' to apply static thresholds to new data. If you want to " \
"reset the gate and call 'fit' again, first call 'reset_gate'"
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if len(thresholds) > 1:
y_threshold = thresholds[1]
data = apply_threshold(data=data,
x=self.x, x_threshold=thresholds[0],
y=self.y, y_threshold=y_threshold)
for definition, df in data.items():
self.add_child(ChildThreshold(name=definition,
definition=definition,
geom=ThresholdGeom(x_threshold=thresholds[0],
y_threshold=y_threshold)))
return None
def fit_predict(self,
data: pd.DataFrame,
ctrl_data: pd.DataFrame or None = None) -> list:
"""
Fit the gate using a given dataframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to call `fit` method.
Parameters
----------
data: Pandas.DataFrame
Population data to fit threshold to
ctrl_data: Pandas.DataFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been called prior to fit_predict
"""
assert len(self.children) > 0, "No children defined for gate, call 'fit' before calling 'fit_predict'"
data = data.copy()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if len(thresholds) == 2:
y_threshold = thresholds[1]
results = apply_threshold(data=data,
x=self.x,
y=self.y,
x_threshold=thresholds[0],
y_threshold=y_threshold)
pops = self._generate_populations(data=results,
x_threshold=thresholds[0],
y_threshold=y_threshold)
return self._match_to_children(new_populations=pops)
def predict(self,
data: pd.DataFrame) -> list:
"""
Using existing children associated to this gate, the previously calculated thresholds of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and thresholds
applied will be STATIC not data driven. For data driven gates call `fit_predict` method.
Parameters
----------
data: Pandas.DataFrame
Data to apply static thresholds too
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been called prior to predict
"""
assert len(self.children) > 0, "Must call 'fit' prior to predict"
self._xy_in_dataframe(data=data)
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if self.y is not None:
data = threshold_2d(data=data,
x=self.x,
y=self.y,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
else:
data = threshold_1d(data=data, x=self.x, x_threshold=self.children[0].geom.x_threshold)
return self._generate_populations(data=data,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
def _generate_populations(self,
data: dict,
x_threshold: float,
y_threshold: float or None) -> list:
"""
Generate populations from a standard dictionary of dataframes that have had thresholds applied.
Parameters
----------
data: Pandas.DataFrame
x_threshold: float
y_threshold: float (optional)
Returns
-------
List
List of Population objects
"""
pops = list()
for definition, df in data.items():
pops.append(Population(population_name=definition,
definition=definition,
parent=self.parent,
n=df.shape[0],
source="gate",
index=df.index.values,
signature=df.mean().to_dict(),
geom=ThresholdGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_threshold=x_threshold,
y_threshold=y_threshold)))
return pops
class PolygonGate(Gate):
"""
PolygonGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The PolygonGate subsets data based on the results of an unsupervised learning algorithm
such a clustering algorithm. PolygonGate supports any clustering algorithm from the
Scikit-Learn machine learning library. Support is extended to any clustering library
that follows the Scikit-Learn template, but currently this only includes HDBSCAN.
Contributions to extend to other libraries are welcome. The name of the class to use
should be provided in "method" along with keyword arguments for initiating this class
in "method_kwargs".
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_values and y_values (if two-dimensional) to "method_kwargs" as two arrays,
this will be interpreted as the x and y coordinates of the polygon to fit to the data.
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
super().__init__(*args, **values)
assert self.y is not None, "Polygon gate expects a y-axis variable"
def _generate_populations(self,
data: pd.DataFrame,
polygons: List[ShapelyPoly]) -> List[Population]:
"""
Given a dataframe and a list of Polygon shapes as generated from the '_fit' method, generate a
list of Population objects.
Parameters
----------
data: Pandas.DataFrame
polygons: list
Returns
-------
List
List of Population objects
"""
pops = list()
for name, poly in zip(ascii_uppercase, polygons):
pop_df = inside_polygon(df=data, x=self.x, y=self.y, poly=poly)
geom = PolygonGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_values=poly.exterior.xy[0],
y_values=poly.exterior.xy[1])
pops.append(Population(population_name=name,
source="gate",
parent=self.parent,
n=pop_df.shape[0],
signature=pop_df.mean().to_dict(),
geom=geom,
index=pop_df.index.values))
return pops
def label_children(self,
labels: dict,
drop: bool = True) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be merged.
If drop is True, then children that are absent from the given dictionary will be dropped.
Parameters
----------
labels: dict
Mapping for new children name
drop: bool (default=True)
If True, children absent from labels will be dropped
Returns
-------
None
Raises
------
AssertionError
If duplicate labels are provided
"""
assert len(set(labels.values())) == len(labels.values()), \
"Duplicate labels provided. Child merging not available for polygon gates"
if drop:
self.children = [c for c in self.children if c.name in labels.keys()]
for c in self.children:
c.name = labels.get(c.name)
def add_child(self,
child: ChildPolygon) -> None:
"""
Add a new child for this gate. Checks that child is valid and overwrites geom with gate information.
Parameters
----------
child: ChildPolygon
Returns
-------
None
Raises
------
TypeError
x_values or y_values is not type list
"""
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x = self.transform_x
child.geom.transform_y = self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
if not isinstance(child.geom.x_values, list):
raise TypeError("ChildPolygon x_values should be of type list")
if not isinstance(child.geom.y_values, list):
raise TypeError("ChildPolygon y_values should be of type list")
self.children.append(child)
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names. Populations are matched to children
based on minimising the hausdorff distance between the set of polygon coordinates defining
the gate as it was originally created and the newly generated gate fitted to new data.
Parameters
-----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
matched_populations = list()
for child in self.children:
hausdorff_distances = [child.geom.shape.hausdorff_distance(pop.geom.shape)
for pop in new_populations]
matching_population = new_populations[int(np.argmin(hausdorff_distances))]
matching_population.population_name = child.name
matched_populations.append(matching_population)
return matched_populations
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual polygon gating. Searches method kwargs for x and y coordinates and returns
polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
x_values or y_values missing from method kwargs
"""
x_values, y_values = self.method_kwargs.get("x_values", None), self.method_kwargs.get("y_values", None)
assert x_values is not None and y_values is not None, "For manual polygon gate must provide x_values and " \
"y_values"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_values = apply_transform(pd.DataFrame({"x": x_values}),
features="x",
method=self.transform_x, **kwargs).x.values
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_values = apply_transform(pd.DataFrame({"y": y_values}),
features="y",
method=self.transform_y, **kwargs).y.values
return create_polygon(x_values, y_values)
def _fit(self,
data: pd.DataFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
List
List of Shapely polygon's
"""
if self.method == "manual":
return [self._manual()]
kwargs = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**kwargs)
self._xy_in_dataframe(data=data)
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
labels = self.model.fit_predict(data[[self.x, self.y]])
hulls = [create_convex_hull(x_values=data.iloc[np.where(labels == i)][self.x].values,
y_values=data.iloc[np.where(labels == i)][self.y].values)
for i in np.unique(labels)]
hulls = [x for x in hulls if len(x[0]) > 0]
return [create_polygon(*x) for x in hulls]
def fit(self,
data: pd.DataFrame,
ctrl_data: None = None) -> None:
"""
Fit the gate using a given dataframe. This will generate new children using the calculated
polygons. If children already exist will raise an AssertionError and notify user to call
`fit_predict`.
Parameters
----------
data: Pandas.DataFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
None
Raises
------
AssertionError
If Children have already been defined i.e. fit has been called previously without calling
'reset_gate'
"""
assert len(self.children) == 0, "Gate is already defined, call 'reset_gate' to clear children"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = self._fit(data=data)
for name, poly in zip(ascii_uppercase, polygons):
self.add_child(ChildPolygon(name=name,
geom=PolygonGeom(x_values=poly.exterior.xy[0].tolist(),
y_values=poly.exterior.xy[1].tolist())))
def fit_predict(self,
data: pd.DataFrame,
ctrl_data: None = None) -> List[Population]:
"""
Fit the gate using a given dataframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to call 'fit' method.
Parameters
----------
data: Pandas.DataFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been previously called
"""
assert len(self.children) > 0, "No children defined for gate, call 'fit' before calling 'fit_predict'"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
return self._match_to_children(self._generate_populations(data=data.copy(),
polygons=self._fit(data=data)))
def predict(self,
data: pd.DataFrame) -> List[Population]:
"""
Using existing children associated to this gate, the previously calculated polygons of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and polygons
applied will be STATIC not data driven. For data driven gates call `fit_predict` method.
Parameters
----------
data: Pandas.DataFrame
Data to apply static polygons to
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been previously called
"""
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = [create_polygon(c.geom.x_values, c.geom.y_values) for c in self.children]
populations = self._generate_populations(data=data, polygons=polygons)
for p, name in zip(populations, [c.name for c in self.children]):
p.population_name = name
return populations
class EllipseGate(PolygonGate):
"""
EllipseGate inherits from PolygonGate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The EllipseGate uses probabilistic mixture models to subset data into "populations". For
each component of the mixture model the covariance matrix is used to generate a confidence
ellipse, surrounding data and emulating a gate. EllipseGate can use any of the methods
from the Scikit-Learn mixture module. Keyword arguments for the initiation of a class
from this module can be given in "method_kwargs".
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for details. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
applying gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as any supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn mixture module..
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the repository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
method = values.get("method", None)
method_kwargs = values.get("method_kwargs", {})
assert method_kwargs.get("covariance_type", "full"), "EllipseGate only supports covariance_type of 'full'"
valid = ["manual", "GaussianMixture", "BayesianGaussianMixture"]
assert method in valid, f"Elliptical gating method should be one of {valid}"
self.conf = method_kwargs.get("conf", 0.95)
super().__init__(*args, **values)
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual elliptical gating. Searches method kwargs for centroid, width, height, and angle,
and returns polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
If axis transformations do not match
TypeError
If centroid, width, height, or angle are of invalid type
ValueError
If centroid, width, height, or angle are missing from method kwargs
"""
centroid = self.method_kwargs.get("centroid", None)
width = self.method_kwargs.get("width", None)
height = self.method_kwargs.get("height", None)
angle = self.method_kwargs.get("angle", None)
if self.transform_x:
assert self.transform_x == self.transform_y, "Manual elliptical gate requires that x and y axis are " \
"transformed to the same scale"
kwargs = self.transform_x_kwargs or {}
centroid = apply_transform(pd.DataFrame({"c": list(centroid)}),
features=["c"],
method=self.transform_x,
**kwargs)["c"].values
df = apply_transform(pd.DataFrame({"w": [width], "h": [height], "a": [angle]}),
features=["w", "h", "a"],
method=self.transform_x,
**kwargs)
width, height, angle = df["w"].values[0], df["h"].values[0], df["a"].values[0]
if not all([x is not None for x in [centroid, width, height, angle]]):
raise ValueError("Manual elliptical gate requires the following keyword arguments; "
"width, height, angle and centroid")
if not len(centroid) == 2 and not all(isinstance(x, float) for x in centroid):
raise TypeError("Centroid should be a list of two float values")
if not all(isinstance(x, float) for x in [width, height, angle]):
raise TypeError("Width, height, and angle should be of type float")
return ellipse_to_polygon(centroid=centroid,
width=width,
height=height,
angle=angle)
def _fit(self,
data: pd.DataFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Pandas.DataFrame
Returns
-------
list
List of Shapely polygon's
"""
params = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**params)
if not self.method_kwargs.get("probabilistic_ellipse", True):
return super()._fit(data=data)
self._xy_in_dataframe(data=data)
if self.sampling.get("method", None) is not None:
data = self._downsample(data=data)
self.model.fit_predict(data[[self.x, self.y]])
ellipses = [probablistic_ellipse(covar, conf=self.conf)
for covar in self.model.covariances_]
polygons = [ellipse_to_polygon(centroid, *ellipse)
for centroid, ellipse in zip(self.model.means_, ellipses)]
return polygons
class BooleanGate(PolygonGate):
"""
The BooleanGate is a special class of Gate that allows for merging, subtraction, and intersection methods.
A BooleanGate should be defined with one of the following string values as its 'method' and a set of
population names as 'populations' in method_kwargs:
* AND - generates a new population containing only events present in every population of a given
set of populations
* OR - generates a new population that is a merger of all unique events from all populations in a given
set of populations
* NOT - generates a new population that contains all events in some target population that are not
present in some set of other populations (taken as the first member of 'populations')
BooleanGate inherits from the PolygonGate and generates a Population with Polygon geometry. This
allows the user to view the resulting 'gate' as a polygon structure. This means
"""
populations = mongoengine.ListField(required=True)
def __init__(self,
method: str,
populations: list,
*args,
**kwargs):
if method not in ["AND", "OR", "NOT"]:
raise ValueError("method must be one of: 'OR', 'AND' or 'NOT'")
super().__init__(*args, method=method, populations=populations, **kwargs)
def _or(self, data: List[pd.DataFrame]) -> pd.DataFrame:
"""
OR operation, generates index of events that is a merger of all unique events from all populations in a given
set of populations.
Parameters
----------
data: list
List of Pandas DataFrames
Returns
-------
Pandas.DataFrame
New population dataframe
"""
idx = np.unique(np.concatenate([df.index.values for df in data], axis=0), axis=0)
return pd.concat(data).drop_duplicates().loc[idx].copy()
def _and(self, data: List[pd.DataFrame]) -> pd.DataFrame:
"""
AND operation, generates index of events that are present in every population of a given
set of populations
Parameters
----------
data: list
List of Pandas DataFrames
Returns
-------
Pandas.DataFrame
New population dataframe
"""
idx = reduce(np.intersect1d, [df.index.values for df in data])
return pd.concat(data).drop_duplicates().loc[idx].copy()
def _not(self,
data: List[pd.DataFrame]) -> pd.DataFrame:
"""
NOT operation, generates index of events that contains all events in some target population that are not
present in some set of other populations
Parameters
----------
data: list
List of Pandas DataFrames
Returns
-------
Pandas.DataFrame
New population dataframe
"""
target = data[0]
subtraction_index = np.unique(np.concatenate([df.index.values for df in data[1:]], axis=0), axis=0)
idx = np.setdiff1d(target.index.values, subtraction_index)
return | pd.concat(data) | pandas.concat |
import time
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from dateutil.parser import parse
from sklearn.cross_validation import KFold
from sklearn.metrics import mean_squared_error
data_path = './'
train = pd.read_csv(data_path + 'f_train_20180204.csv', encoding='gb2312')
test = | pd.read_csv(data_path + 'f_test_a_20180204.csv', encoding='gb2312') | pandas.read_csv |
########################################################################
#
# Functions for loading financial data.
#
# Data from several different files are combined into a single
# Pandas DataFrame for each stock or stock-index.
#
# The price-data is read from CSV-files from Yahoo Finance.
# Other financial data (Sales Per Share, Book-Value Per Share, etc.)
# is read from tab-separated text-files with date-format MM/DD/YYYY.
#
########################################################################
#
# This file is part of FinanceOps:
#
# https://github.com/Hvass-Labs/FinanceOps
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2018 by <NAME>
#
########################################################################
import pandas as pd
import numpy as np
import os
from data_keys import *
from returns import total_return
########################################################################
# Data-directory. Set this before calling any of the load-functions.
data_dir = "data/"
########################################################################
# Private helper-functions.
def _resample_daily(data):
"""
Resample data using linear interpolation.
:param data: Pandas DataFrame or Series.
:return: Resampled daily data.
"""
return data.resample('D').interpolate(method='linear')
def _load_data(path):
"""
Load a CSV-file with tab-separation, date-index is in first column
and uses the MM/DD/YYYY.
This is a simple wrapper for Pandas.read_csv().
:param path: Path for the data-file.
:return: Pandas DataFrame.
"""
data = pd.read_csv(path,
sep="\t",
index_col=0,
parse_dates=True,
dayfirst=False)
return data
def _load_price_yahoo(ticker):
"""
Load share-price data from a Yahoo CSV-file.
Only retrieve the 'Close' and 'Adj Close' prices
which are interpolated to daily values.
The 'Close' price-data is adjusted for stock-splits.
The 'Adj Close' price-data is adjusted for both
stock-splits and dividends, so it corresponds to
the Total Return.
https://help.yahoo.com/kb/SLN2311.html
:param ticker: Ticker-name for the data to load.
:return: Pandas DataFrame with SHARE_PRICE and TOTAL_RETURN
"""
# Path for the data-file to load.
path = os.path.join(data_dir, ticker + " Share-Price (Yahoo).csv")
# Read share-prices from file.
price_raw = pd.read_csv(path,
index_col=0,
header=0,
sep=',',
parse_dates=[0],
dayfirst=False)
# Rename columns.
columns = \
{
'Adj Close': TOTAL_RETURN,
'Close': SHARE_PRICE
}
price = price_raw.rename(columns=columns)
# Select the columns we need.
price = price[[TOTAL_RETURN, SHARE_PRICE]]
# Interpolate to get prices for all days.
price_daily = _resample_daily(price)
return price_daily
########################################################################
# Public functions.
def load_usa_cpi():
"""
Load the U.S. Consumer Price Index (CPI) which measures inflation.
The data is interpolated to get daily values.
http://www.bls.gov/cpi/data.htm
:return: Pandas DataFrame.
"""
# Path for the data-file to load.
path = os.path.join(data_dir, "USA CPI.csv")
# Load the data.
data = | pd.read_csv(path, sep=",", parse_dates=[3], index_col=3) | pandas.read_csv |
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def getBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if pd.isna(el1) or pd.isna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurename
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprsum = seq['features']['gpr_Frankland_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = max(phrase_ix) + 1
pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informationcontent' in seq['features'].keys():
informationcontent = seq['features']['informationcontent']
pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informationcontentfirst'] = 'numeric'
arfftype['informationcontentsecond'] = 'numeric'
arfftype['informationcontentthird'] = 'numeric'
arfftype['informationcontentfourth'] = 'numeric'
arfftype['informationcontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largetosmall'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.max(1) - diat.min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenominator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not | pd.isna(ix) | pandas.isna |
import numpy as np
import pandas as pd
import tensorflow as tf
from shfl.data_base.data_base import DataBase
from shfl.data_distribution.data_distribution_iid import IidDataDistribution
class TestDataBase(DataBase):
def __init__(self):
super(TestDataBase, self).__init__()
def load_data(self):
self._train_data = np.random.rand(200).reshape([40, 5])
self._test_data = np.random.rand(200).reshape([40, 5])
self._train_labels = tf.keras.utils.to_categorical(np.random.randint(0, 10, 40))
self._test_labels = tf.keras.utils.to_categorical(np.random.randint(0, 10, 40))
class TestDataBasePandas(DataBase):
def __init__(self):
super(TestDataBasePandas, self).__init__()
def load_data(self):
self._train_data = pd.DataFrame(np.random.rand(200).reshape([40, 5]))
self._test_data = pd.DataFrame(np.random.rand(200).reshape([40, 5]))
self._train_labels = pd.DataFrame(tf.keras.utils.to_categorical(np.random.randint(0, 10, 40)))
self._test_labels = pd.DataFrame(tf.keras.utils.to_categorical(np.random.randint(0, 10, 40)))
def test_make_data_federated():
data = TestDataBase()
data.load_data()
data_distribution = IidDataDistribution(data)
train_data, train_label = data_distribution._database.train
num_nodes = 3
percent = 60
# weights = np.full(num_nodes, 1/num_nodes)
weights = [0.5, 0.25, 0.25]
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
percent,
num_nodes,
weights)
data_distribution.get_federated_data(3)
all_data = np.concatenate(federated_data)
all_label = np.concatenate(federated_label)
idx = []
for data in all_data:
idx.append(np.where((data == train_data).all(axis=1))[0][0])
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == len(federated_data) == len(federated_label)
assert (np.sort(all_data.ravel()) == np.sort(train_data[idx,].ravel())).all()
assert (np.sort(all_label, 0) == np.sort(train_label[idx], 0)).all()
#test make federated data with replacement
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
percent,
num_nodes,
weights,
sampling="with_replacement")
all_data = np.concatenate(federated_data)
all_label = np.concatenate(federated_label)
idx = []
for data in all_data:
idx.append(np.where((data == train_data).all(axis=1))[0][0])
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == len(federated_data) == len(federated_label)
assert (np.sort(all_data.ravel()) == np.sort(train_data[idx,].ravel())).all()
assert (np.sort(all_label, 0) == np.sort(train_label[idx], 0)).all()
def test_make_data_federated_pandas():
data = TestDataBasePandas()
data.load_data()
data_distribution = IidDataDistribution(data)
train_data, train_label = data_distribution._database.train
num_nodes = 3
percent = 60
# weights = np.full(num_nodes, 1/num_nodes)
weights = [0.5, 0.25, 0.25]
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
percent,
num_nodes,
weights)
data_distribution.get_federated_data(3)
all_data = pd.concat(federated_data)
all_label = pd.concat(federated_label)
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == len(federated_data) == len(federated_label)
pd.testing.assert_frame_equal(all_data, train_data.iloc[all_data.index.values])
pd.testing.assert_frame_equal(all_label, train_label.iloc[all_data.index.values])
#test make federated data with replacement
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
percent,
num_nodes,
weights,
sampling="with_replacement")
all_data = pd.concat(federated_data)
all_label = pd.concat(federated_label)
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == len(federated_data) == len(federated_label)
| pd.testing.assert_frame_equal(all_data, train_data.iloc[all_data.index.values]) | pandas.testing.assert_frame_equal |
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = | pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]}) | pandas.DataFrame |
"Test suite of AirBnbModel.source.processing module"
import numpy as np
import pandas as pd
import pytest
from pandas._testing import assert_index_equal
from AirBnbModel.source.processing import intersect_index
class TestIntersectIndex(object):
"Test suite for intersect_index method"
def test_first_input_not_pandas_dataframe_or_series(self):
"First input passed as a list. Should return AssertionError"
input1 = [1, 2, 3, 4]
input2 = pd.Series(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"])
with pytest.raises(AssertionError) as e:
intersect_index(input1, input2)
assert e.match("input1 is not either a pandas DataFrame or Series")
def test_second_input_not_pandas_dataframe_or_series(self):
"Second input passed as a list. Should return AssertionError"
input1 = pd.Series(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"])
input2 = [1, 2, 3, 4]
with pytest.raises(AssertionError) as e:
intersect_index(input1, input2)
assert e.match("input2 is not either a pandas DataFrame or Series")
def test_index_as_string(self):
"Index of both inputs are string (object) dtypes."
input1 = pd.Series(data=[1, 2, 3], index=["foo", "bar", "bar"])
input2 = pd.Series(data=[4, 5, 6], index=["bar", "foo", "qux"])
expected = pd.Index(["foo", "bar"])
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_index_as_number(self):
"Index of both inputs are int dtypes."
input1 = pd.Series(data=[1, 2, 3], index=[1, 2, 3])
input2 = pd.Series(data=[4, 5, 6], index=[1, 1, 4])
expected = pd.Index([1])
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_null_intersection_between_inputs(self):
"There is not intersection between. Should return an empty pd.Index()"
input1 = pd.Series(data=[1, 2, 3], index=[1, 2, 3])
input2 = pd.Series(data=[4, 5, 6], index=[4, 5, 6])
expected = pd.Index([], dtype="int64")
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_dropna_true(self):
"Intersection contains NaN values. dropna=True should remove it"
input1 = | pd.Series(data=[1, 2, 3, 4], index=["foo", "bar", "bar", np.nan]) | pandas.Series |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Example:
Create directories::
mkdir -p test-data/input
mkdir -p test-data/output
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import ntpath
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class TestLogPusher(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = TestLogPusher('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = | pd.concat([df1, df2, df3], join='inner') | pandas.concat |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
self.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index3)
def test_intersection_cases(self):
base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = period_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = period_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], name='idx', freq='D')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
# non-monotonic
base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
rng2 = PeriodIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
freq='D', name='idx')
expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name='idx')
rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',
'2011-02-03'],
freq='D', name='other')
expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], freq='D', name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, 'D')
# empty same freq
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = | pd.period_range('1/1/2000', freq='D', periods=5) | pandas.period_range |
import logging
import pandas as pd
import time
import wikifier
import typing
import hashlib
import json
import os
from .parser_base import ParserBase, PreParsedResult
from pandas.util import hash_pandas_object
from datamart_isi.materializers.general_materializer import GeneralMaterializer
from datamart_isi.materializers.wikitables_materializer import WikitablesMaterializer
from datamart_isi.utilities.d3m_wikifier import save_wikifier_choice, check_is_q_node_column
from datamart_isi.cache.general_search_cache import GeneralSearchCache
from datamart_isi.utilities.utils import Utils as datamart_utils
from datamart_isi import config as config_datamart
from etk.wikidata.entity import *
from etk.wikidata.value import *
from io import StringIO
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from wikifier.utils import remove_punctuation
from datamart_isi.utilities.timeout import timeout_call
SUPPORT_TYPE = ["csv"]
MODULE_NAME = "CSVParser"
class CSVParser(ParserBase):
def __init__(self):
self._logger = logging.getLogger(__name__)
self.cache_manager = GeneralSearchCache()
def load_and_preprocess(self, **kwargs):
input_dir = kwargs.get("input_dir")
file_type = kwargs.get("file_type", "csv")
job = kwargs.get("job", None)
wikifier_choice = kwargs.get("wikifier_choice", "auto")
start = time.time()
self._logger.debug("Start loading from " + input_dir)
if job is not None:
job.meta['step'] = "materializing the dataset..."
job.save_meta()
from_online_file = False
if file_type == "csv":
try:
_ = [pd.read_csv(input_dir, dtype=str)]
file_type = "online_csv"
except Exception:
raise ValueError("Reading csv from" + input_dir + "failed.")
if len(file_type) > 7 and file_type[:7] == "online_":
from_online_file = True
general_materializer = GeneralMaterializer()
file_type = file_type[7:]
# example: "csv"
file_metadata = {
"materialization": {
"arguments": {
"url": input_dir,
"file_type": file_type
}
}
}
try:
result = general_materializer.get(metadata=file_metadata).to_csv(index=False)
except Exception as e:
self._logger.debug(e, exc_info=True)
raise ValueError("Loading online data from " + input_dir + " failed!")
# remove last \n so that we will not get an extra useless row
if result[-1] == "\n":
result = result[:-1]
loaded_data = StringIO(result)
loaded_data = [ | pd.read_csv(loaded_data, dtype=str) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 09:20:01 2018
@authors: <NAME>
Last modified: 2020-02-19
------------------------------------------
** Semantic Search Analysis: Start-up **
------------------------------------------
This script: Import search queries from Google Analytics, clean up,
match query entries against historical files.
Okay to run all at once, but see the script for instructions for manual operations.
INPUTS:
- data/raw/SearchConsoleNew.csv - log of google.com search results (GA calls "Queries") where person landed on your site
- data/raw/SiteSearchNew.csv - log from your site search (GA calls "Search Terms")
- data/matchFiles/SiteSpecificMatches.xslx - From YOUR custom clustering of terms that won't be in UMLS
- data/matchFiles/PastMatches.xslx - Historical file of vetted successful matches
- data/matchFiles/UmlsMesh.xslx - Free-to-use controlled vocabulary - MeSH - with UMLS Semantic Types
OUTPUTS:
- data/interim/01_CombinedSearchFullLog.xlsx - Lightly modified full log before changes
- data/interim/ForeignUnresolved.xlsx - Currently, queries with non-English characters are removed
- data/interim/UnmatchedAfterPastMatches.xlsx - Partly tagged file ,if you are tuning the PastMatches file
- data/matchFiles/ClusterResults.xlsx - Unmatched terms, top CLUSTERS - update matchFiles in batches
- data/interim/ManualMatch.xlsx - Unmatched terms, top FREQUENCY COUNTS - update matchFiles one at a time
- data/interim/LogAfterJournals.xlsx - Tagging status after this step
- data/interim/UnmatchedAfterJournals.xlsx - What still needs to be tagged after this step.
-------------------------------
HOW TO EXPORT YOUR SOURCE DATA
-------------------------------
Script assumes Google Analytics where search logging has been configured. Can
be adapted for other tools. This method AVOIDS personally identifiable
information ENTIRELY.
1. Set date parameters (Consider 1 month)
2. Go to Acquisition > Search Console > Queries
3. Select Export > Unsampled Report as SearchConsoleNew.csv
4. Copy the result to data/raw folder
5. Do the same from Behavior > Site Search > Search Terms with file name
SiteSearchNew.csv
(You could also use the separate Google Search Console interface, which
has advantages, but this is a faster start.)
----------------
SCRIPT CONTENTS
----------------
1. Start-up / What to put into place, where
2. Create dataframe from query log; globally update columns and rows
3. Assign terms with non-English characters to ForeignUnresolved
4. Make special-case assignments with F&R, RegEx: Bibliographic, Numeric, Named entities
5. Ignore everything except one program/product/service term
6. Exact-match to site-specific and vetted past matches
7. Eyeball results; manually classify remaining "brands" into SiteSpecificMatches
* PROJECT STARTUP - OPTIONAL: UPDATE SITE-SEPCIFIC MATCHES AND RE-RUN TO THIS POINT *
8. Exact-match to UmlsMesh
9. Exact match to journal file (necessary for pilot site, replace with your site-specific need)
10. MANUAL PROCESS: Re-cluster, update SiteSpecificMatches.xlsx, re-run
11. MANUALLY add matches from ManualMatch.xlsx for high-frequency unclassified
12. Write out LogAfterJournals and UnmatchedAfterJournals
13. Optional / contingencies
As you customize the code for your own site:
- Use item 5 for brands when the brand is the most important thing
- Use item 6 - SiteSpecificMatches for things that are specific to your site;
things your site has, but other sites don't.
- Use item 6 - PastMatches, for generic terms that would be relevant
to any health-medical site.
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
File locations, etc.
'''
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis, show
import matplotlib.ticker as mtick # used for example in 100-percent bars chart
import numpy as np
import os
import re
import string
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import collections
import copy
from pathlib import *
# To be used with str(Path.home())
# Set working directory and directories for read/write
home_folder = str(Path.home()) # os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataRaw = 'data/raw/' # Put log here before running script
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
dataInterim = 'data/interim/' # Save to disk as desired, to re-start easily
reports = 'reports/'
SearchConsoleRaw = dataRaw + 'SearchConsoleNew.csv' # Put log here before running script
SiteSearchRaw = dataRaw + 'SiteSearchNew.csv' # Put log here before running script
#%%
# ======================================================================
# 2. Create dataframe from query log; globally update columns and rows
# ======================================================================
'''
If you need to concat multiple files, one option is
searchLog = pd.concat([x1, x2, x3], ignore_index=True)
File will have junk rows at top and bottom that this code removes.
'''
# --------------
# SearchConsole
# --------------
SearchConsole = pd.read_csv(SearchConsoleRaw, sep=',', index_col=False) # skiprows=7,
SearchConsole.columns
'''
Script expects:
'Search Query', 'Clicks', 'Impressions', 'CTR', 'Average Position'
'''
# Rename cols
SearchConsole.rename(columns={'Search Query': 'Query',
'Average Position': 'AveragePosition'}, inplace=True)
SearchConsole.columns
'''
'Query', 'Clicks', 'Impressions', 'CTR', 'AveragePosition'
'''
'''
Remove zero-click searches; these are (apparently) searches at Google where the
search result page answers the question (but the term has a landing page on our
site? Unclear what's going on.
For example, https://www.similarweb.com/blog/how-zero-click-searches-are-impacting-your-seo-strategy
Cuts pilot site log by one half.
'''
SearchConsole = SearchConsole.loc[(SearchConsole['Clicks'] > 0)]
# SearchConsole.shape
# -----------
# SiteSearch
# -----------
SiteSearch = pd.read_csv(SiteSearchRaw, sep=',', index_col=False) # skiprows=7,
SiteSearch.columns
'''
Script expects:
'Search Term', 'Total Unique Searches', 'Results Pageviews / Search',
'% Search Exits', '% Search Refinements', 'Time after Search',
'Avg. Search Depth'
'''
# Rename cols
SiteSearch.rename(columns={'Search Term': 'Query',
'Total Unique Searches': 'TotalUniqueSearches',
'Results Pageviews / Search': 'ResultsPVSearch',
'% Search Exits': 'PercentSearchExits',
'% Search Refinements': 'PercentSearchRefinements',
'Time after Search': 'TimeAfterSearch',
'Avg. Search Depth': 'AvgSearchDepth'}, inplace=True)
SiteSearch.columns
'''
'Query', 'TotalUniqueSearches', 'ResultsPVSearch', 'PercentSearchExits',
'PercentSearchRefinements', 'TimeAfterSearch', 'AvgSearchDepth'
'''
# Join the two df's, keeping all rows and putting terms in common into one row
CombinedLog = pd.merge(SearchConsole, SiteSearch, on = 'Query', how = 'outer')
# New col for total times people searched for term, regardless of location searched from
CombinedLog['TotalSearchFreq'] = CombinedLog.fillna(0)['Clicks'] + CombinedLog.fillna(0)['TotalUniqueSearches']
CombinedLog = CombinedLog.sort_values(by='TotalSearchFreq', ascending=False).reset_index(drop=True)
# Queries longer than 255 char generate an error in Excel. Shouldn't be that
# long anyway; let's cut off at 100 char (still too long but stops the error)
# ?? df.apply(lambda x: x.str.slice(0, 20))
CombinedLog['Query'] = CombinedLog['Query'].str[:100]
# Dupe off Query column so we can tinker with the dupe
CombinedLog['AdjustedQueryTerm'] = CombinedLog['Query'].str.lower()
# -------------------------
# Remove punctuation, etc.
# -------------------------
# Replace hyphen with space because the below would replace with nothing
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('-', ' ')
# Remove https:// if used
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('http://', '')
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace('https://', '')
'''
Regular expressions info from https://docs.python.org/3/library/re.html
^ (Caret.) Matches the start of the string, and in MULTILINE mode also
matches immediately after each newline.
w For Unicode (str) patterns: Matches Unicode word characters; this
includes most characters that can be part of a word in any language,
as well as numbers and the underscore. If the ASCII flag is used, only
[a-zA-Z0-9_] is matched.
s For Unicode (str) patterns: Matches Unicode whitespace characters
(which includes [ \t\n\r\fv], and also many other characters, for
example the non-breaking spaces mandated by typography rules in many
languages). If the ASCII flag is used, only [ \t\n\r\fv] is matched.
+ Causes the resulting RE to match 1 or more repetitions of the preceding
RE. ab+ will match ‘a’ followed by any non-zero number of ‘b’s; it will
not match just ‘a’.
Spyder editor can somehow lose the regex, such as when it is copied and pasted
inside the editor; an attempt to preserve inside this comment: (r'[^\w\s]+','')
'''
# Remove all chars except a-zA-Z0-9 and leave foreign chars alone
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace(r'[^\w\s]+', '')
# Remove modified entries that are now dupes or blank entries
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replace(' ', ' ') # two spaces to one
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.strip() # remove leading and trailing spaces
CombinedLog = CombinedLog.loc[(CombinedLog['AdjustedQueryTerm'] != "")]
# Write out this version; won't need most columns until later
writer = pd.ExcelWriter(dataInterim + '01_CombinedSearchFullLog.xlsx')
CombinedLog.to_excel(writer,'CombinedLogFull', index=False)
# df2.to_excel(writer,'Sheet2')
writer.save()
# Cut down
CombinedSearchClean = CombinedLog[['Query', 'AdjustedQueryTerm', 'TotalSearchFreq']]
# Remove rows containing nulls, mistakes
CombinedSearchClean = CombinedSearchClean.dropna()
# Add match cols
CombinedSearchClean['PreferredTerm'] = ''
CombinedSearchClean['SemanticType'] = ''
# Free up memory
del [[SearchConsole, SiteSearch, CombinedLog]]
# CombinedSearchClean.head()
CombinedSearchClean.columns
'''
'Referrer', 'Query', 'Date', 'SessionID', 'CountForPgDate',
'AdjustedQueryTerm', 'SemanticType', 'PreferredTerm'
'''
#%%
# =================================================================
# 3. Assign terms with non-English characters to ForeignUnresolved
# =================================================================
'''
UMLS MetaMap should not be given anything other than flat ASCII - no foreign
characters, no high-ASCII apostrophes or quotes, etc., at least as of October
2019. Flag these so later you can remove them from processing. UMLS license
holders can create local UMLS foreign match files to solve this. The current
implementation runs without need for a UMLS license (i.e., many vocabularies
have been left out).
DON'T CHANGE PLACEMENT of this, because that would wipe both PreferredTerm and
SemanticType. Future procedures can replace this content with the correct
translation.
FIXME - Some of these are not foreign; R&D how to avoid assigning as foreign;
start by seeing whether orig term had non-ascii characters.
Mistaken assignments that are 1-4-word single-concept searches will be
overwritten with the correct data. And a smaller number of other types will
be reclaimed as well.
- valuation of fluorescence in situ hybridization as an ancillary tool to
urine cytology in diagnosing urothelial carcinoma
- comparison of a light‐emitting diode with conventional light sources for
providing phototherapy to jaundiced newborn infants
- crystal structure of ovalbumin
- diet exercise or diet with exercise 18–65 years old
'''
# Other unrecognized characters, flag as foreign. Eyeball these once in a while and update the above.
def checkForeign(row):
# print(row)
foreignYes = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'Foreign unresolved', 'SemanticType':'Foreign unresolved'}
foreignNo = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'','SemanticType':''} # Wipes out previous content!!
try:
row.AdjustedQueryTerm.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return pd.Series(foreignYes)
else:
return | pd.Series(foreignNo) | pandas.Series |
'''
Pulls data from xml and creates an array for each user consisting of PMID,
type, and annotation. Uses NLTK scoring metrics tools to determine
precision, recall, and f-score. By including PMID in the hash, this version
allows for examining user to user comparisons across multiple documents in the
group. Averages by User in one shot, instead of an average of averages.
Uses userid instead of user_name. Treats one of the users as the test set, the
other user as the gold standard for each pairing.
'''
from django.contrib.auth.models import User
from django.conf import settings
from ..common.formatter import clean_df
from ..common.models import Group
from ..document.models import Document
from .models import Report
from . import synonyms_dict
from nltk.metrics import scores as nltk_scoring
import pandas as pd
import networkx as nx
import itertools
def hashed_er_annotations_df(group_pk, compare_type=True):
"""Generate a Entity Recognition DataFrame with additional hash column
"""
group = Group.objects.get(pk=group_pk)
org_er_df = Document.objects.ner_df(document_pks=group.get_document_pks(), include_pubtator=False)
er_df = clean_df(org_er_df)
if compare_type:
er_df['hash'] = er_df.document_pk.apply(str) + '_' + er_df.ann_type_idx.apply(str) + '_' + er_df.section_offset.apply(str) + '_' + er_df.length.apply(str)
else:
er_df['hash'] = er_df.document_pk.apply(str) + '_' + er_df.section_offset.apply(str) + '_' + er_df.length.apply(str)
return er_df
def compute_pairwise(hashed_er_anns_df):
"""
Returns pairwise comparision between users (uesr_a & user_b)
that have completed similar documents
"""
# Make user_pks unique
userset = set(hashed_er_anns_df.user_id)
inter_annotator_arr = []
# For each unique user comparision, compute
for user_a, user_b in itertools.combinations(userset, 2):
# The list of document_pks that each user had completed
user_a_set = set(hashed_er_anns_df[hashed_er_anns_df['user_id'] == user_a].document_pk)
user_b_set = set(hashed_er_anns_df[hashed_er_anns_df['user_id'] == user_b].document_pk)
# Only compare documents both users have completed
pmid_set = user_a_set.intersection(user_b_set)
# If user_a and user_b have completed shared PMID, compute comparisions
if len(pmid_set) != 0:
pmid_df = hashed_er_anns_df[hashed_er_anns_df['document_pk'].isin(pmid_set)]
ref_set = set(pmid_df[pmid_df['user_id'] == user_a].hash)
test_set = set(pmid_df[pmid_df['user_id'] == user_b].hash)
# Compute the precision, recall and F-measure based on
# the unique hashes
inter_annotator_arr.append((
user_a,
user_b,
len(pmid_set),
nltk_scoring.precision(ref_set, test_set),
nltk_scoring.recall(ref_set, test_set),
nltk_scoring.f_measure(ref_set, test_set)
))
return pd.DataFrame(inter_annotator_arr, columns=('user_a', 'user_b', 'docs_compared', 'precision', 'recall', 'f-score'))
def merge_pairwise_comparisons(inter_annotator_df):
"""
Merging User1 and User2 columns for the pairings since combi ensures that
that users are paired with each other only once (no reverse pairing)
Args:
inter_annotator_df (pd.DataFrame)
Returns:
pd.DataFrame
"""
# Sort rows by best F-Score at the top
inter_annotator_df.sort_values('f-score', ascending=False, inplace=True)
all_users_arr = []
for group_idx, group in inter_annotator_df.groupby('user_a'):
all_users_arr.append((
group_idx,
group.shape[0],
group['f-score'].sum()
))
for group_idx, group in inter_annotator_df.groupby('user_b'):
all_users_arr.append((
group_idx,
group.shape[0],
group['f-score'].sum()
))
temp_df = | pd.DataFrame(all_users_arr, columns=('user_id', 'pairings', 'total_f')) | pandas.DataFrame |
import pandas as pd
import numpy as np
import torch
import os.path
from glob import glob
from datetime import datetime
from base.torchvision_dataset import TorchvisionDataset
from torch.utils.data import TensorDataset
class HR_Dataset(TorchvisionDataset):
def __init__(self, root:str, normal_class):
super().__init__(root)
self.normal_class = normal_class
# x_array = [[[0 for k in range(3)] for j in range(11932)]]
# load lists of participant ids
# id_fb, id_nfb = load_id('/workspace/HR_WearablesData/')
# id_fb = np.load("/workspace/fitbit_id.npy")
# id_nfb = np.load("/workspace/nonfitbit_id.npy")
# id_anomalies = load_labels('/workspace/datasets/Health New Labeling.xlsx')
# df = load_fitbit_data(id_fb[0])
# x_array = cut_to_same_length(df, x_array)
# y_array = np.zeros(x_array.shape[0])
# index_array = np.arange(x_array.shape[0])
print("start")
dim1_train = pd.read_csv("/workspace/dim1_train.txt").to_numpy()
dim2_train = pd.read_csv("/workspace/dim2_train.txt").to_numpy()
dim3_train = | pd.read_csv("/workspace/dim3_train.txt") | pandas.read_csv |
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
import os
import re
from sklearn.model_selection import train_test_split
import random
import scorecardpy as sc
# split train into train data and test data
# os.chdir(r'D:\GWU\Aihan\DATS 6103 Data Mining\Final Project\Code')
def split_data(inpath, target_name, test_size):
df = pd.read_csv(inpath)
y = df[target_name]
#x = df1.loc[:,df1.columns!='loan_default']
x=df.drop(target_name,axis=1)
# set a random seed for the data, so that we could get the same train and test set
random.seed(12345)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=1, stratify=y)
training = | pd.concat([X_train, y_train], axis=1) | pandas.concat |
import time
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
class CFE():
def __init__(self, cfg_file=None):
super(CFE, self).__init__()
self.cfg_file = cfg_file
############################################################
# ________________________________________________________ #
# ________________________________________________________ #
# GET VALUES FROM CONFIGURATION FILE. #
#
self.config_from_json() #
#
# GET VALUES FROM CONFIGURATION FILE. #
# ________________________________________________________ #
# ________________________________________________________ #
############################################################
# ________________________________________________
# In order to check mass conservation at any time
self.reset_volume_tracking()
# ________________________________________________
# initialize simulation constants
atm_press_Pa=101325.0
unit_weight_water_N_per_m3=9810.0
# ________________________________________________
# Time control
self.time_step_size = 3600
self.timestep_h = self.time_step_size / 3600.0
self.timestep_d = self.timestep_h / 24.0
self.current_time_step = 0
self.current_time = | pd.Timestamp(year=1970, month=1, day=1, hour=0) | pandas.Timestamp |
import copy
import os
from functools import partial
import joblib
import numpy as np
import optuna
import pandas as pd
import lightgbm as lgbm
from .enums import ProblemType
from .logger import logger
from .metrics import Metrics
from .params import get_params
optuna.logging.set_verbosity(optuna.logging.INFO)
def reduce_memory_usage(df, verbose=True):
# NOTE: Original author of this function is unknown
# if you know the *original author*, please let me know.
numerics = ["int8", "int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
logger.info(
"Mem. usage decreased to {:.2f} Mb ({:.1f}% reduction)".format(
end_mem, 100 * (start_mem - end_mem) / start_mem
)
)
return df
def dict_mean(dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def save_valid_predictions(final_valid_predictions, model_config, target_encoder, output_file_name):
final_valid_predictions = | pd.DataFrame.from_dict(final_valid_predictions, orient="index") | pandas.DataFrame.from_dict |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
#coding:utf-8
import pandas as pd
import numpy as np
# 读取个人信息
train_agg = pd.read_csv('../data/train_agg.csv',sep='\t')
test_agg = pd.read_csv('../data/test_agg.csv',sep='\t')
agg = | pd.concat([train_agg,test_agg],copy=False) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex,
to_datetime, date_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[ | pd.Timedelta('00:01:00') | pandas.Timedelta |
# 从Binance币安在线api下载1分钟k线,进行回测
import requests
import backtrader as bt
import backtrader.analyzers as btanalyzers
import json
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
def get_binance_bars(symbol, interval, startTime, endTime):
url = "https://api.binance.com/api/v3/klines"
startTime = str(int(startTime.timestamp() * 1000))
endTime = str(int(endTime.timestamp() * 1000))
limit = '1000'
req_params = {"symbol" : symbol, 'interval' : interval, 'startTime' : startTime, 'endTime' : endTime, 'limit' : limit}
df = pd.DataFrame(json.loads(requests.get(url, params = req_params).text))
if (len(df.index) == 0):
return None
df = df.iloc[:, 0:6]
df.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']
df.open = df.open.astype("float")
df.high = df.high.astype("float")
df.low = df.low.astype("float")
df.close = df.close.astype("float")
df.volume = df.volume.astype("float")
df['adj_close'] = df['close']
df.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in df.datetime]
return df
df_list = []
# 数据起点时间
last_datetime = dt.datetime(2020, 11, 23)
while True:
new_df = get_binance_bars('ETHUSDT', '1m', last_datetime, dt.datetime.now()) # 获取1分钟k线数据
if new_df is None:
break
df_list.append(new_df)
last_datetime = max(new_df.index) + dt.timedelta(0, 1)
df = | pd.concat(df_list) | pandas.concat |
'''
@Author: mendeslbruno
Date: 2021-01-26
Descr: Performs some simple analyzes for several actions of the index SP500.
'''
import pandas as pd
import yfinance as yf
import streamlit as st
import datetime as dt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
snp500 = pd.read_csv("datasets/SP500.csv")
symbols = snp500['Symbol'].sort_values().tolist()
ticker = st.sidebar.selectbox(
'Choose a S&P 500 Stock',
symbols)
infoType = st.sidebar.radio(
"Choose an info type",
('Fundamental', 'Technical')
)
stock = yf.Ticker(ticker)
if(infoType == 'Fundamental'):
stock = yf.Ticker(ticker)
info = stock.info
st.title('Company Profile')
st.subheader(info['longName'])
st.markdown('** Sector **: ' + info['sector'])
st.markdown('** Industry **: ' + info['industry'])
st.markdown('** Phone **: ' + info['phone'])
st.markdown('** Address **: ' + info['address1'] + ', ' + info['city'] + ', ' + info['zip'] + ', ' + info['country'])
st.markdown('** Website **: ' + info['website'])
st.markdown('** Business Summary **')
st.info(info['longBusinessSummary'])
fundInfo = {
'Enterprise Value (USD)': info['enterpriseValue'],
'Enterprise To Revenue Ratio': info['enterpriseToRevenue'],
'Enterprise To Ebitda Ratio': info['enterpriseToEbitda'],
'Net Income (USD)': info['netIncomeToCommon'],
'Profit Margin Ratio': info['profitMargins'],
'Forward PE Ratio': info['forwardPE'],
'PEG Ratio': info['pegRatio'],
'Price to Book Ratio': info['priceToBook'],
'Forward EPS (USD)': info['forwardEps'],
'Beta ': info['beta'],
'Book Value (USD)': info['bookValue'],
'Dividend Rate (%)': info['dividendRate'],
'Dividend Yield (%)': info['dividendYield'],
'Five year Avg Dividend Yield (%)': info['fiveYearAvgDividendYield'],
'Payout Ratio': info['payoutRatio']
}
fundDF = pd.DataFrame.from_dict(fundInfo, orient='index')
fundDF = fundDF.rename(columns={0: 'Value'})
st.subheader('Fundamental Info')
st.table(fundDF)
st.subheader('General Stock Info')
st.markdown('** Market **: ' + info['market'])
st.markdown('** Exchange **: ' + info['exchange'])
st.markdown('** Quote Type **: ' + info['quoteType'])
start = dt.datetime.today()-dt.timedelta(2 * 365)
end = dt.datetime.today()
df = yf.download(ticker,start,end)
df = df.reset_index()
fig = go.Figure(
data=go.Scatter(x=df['Date'], y=df['Adj Close'])
)
fig.update_layout(
title={
'text': "Stock Prices Over Past Two Years",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
st.plotly_chart(fig, use_container_width=True)
marketInfo = {
"Volume": info['volume'],
"Average Volume": info['averageVolume'],
"Market Cap": info["marketCap"],
"Float Shares": info['floatShares'],
"Regular Market Price (USD)": info['regularMarketPrice'],
'Bid Size': info['bidSize'],
'Ask Size': info['askSize'],
"Share Short": info['sharesShort'],
'Short Ratio': info['shortRatio'],
'Share Outstanding': info['sharesOutstanding']
}
marketDF = | pd.DataFrame(data=marketInfo, index=[0]) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six.moves import zip_longest
import copy
import re
from types import GeneratorType
from collections import Counter, defaultdict, Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
from skbio.util import assert_data_frame_almost_equal
from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
_as_slice_if_single_index)
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(11)))
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(11)})
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
def test_init_handles_missing_metadata_efficiently(self):
seq = Sequence('ACGT')
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
# initializing from an existing Sequence object should handle metadata
# attributes efficiently on both objects
new_seq = Sequence(seq)
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(new_seq._metadata)
self.assertIsNone(new_seq._positional_metadata)
self.assertFalse(seq.has_metadata())
self.assertFalse(seq.has_positional_metadata())
self.assertFalse(new_seq.has_metadata())
self.assertFalse(new_seq.has_positional_metadata())
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0, ))
npt.assert_equal(seq.values, np.array('', dtype='c'))
self.assertEqual(str(seq), '')
self.assertEqual(len(seq), 0)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
self.assertEqual(str(seq), 'A')
self.assertEqual(len(seq), 1)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(1)))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
self.assertEqual(str(seq), '.ABC\t123 xyz-')
self.assertEqual(len(seq), 14)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(14)))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}),
Sequence('ACGT', metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}))
# subclasses work too
seq = SequenceSubclass('ACGT',
metadata={'id': 'foo',
'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(
Sequence(seq),
Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)}))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_metadata(self):
for empty in None, {}:
seq = Sequence('', metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
def test_init_empty_metadata_key(self):
seq = Sequence('', metadata={'': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'': ''})
def test_init_empty_metadata_item(self):
seq = Sequence('', metadata={'foo': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': ''})
def test_init_single_character_metadata_item(self):
seq = Sequence('', metadata={'foo': 'z'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': 'z'})
def test_init_multiple_character_metadata_item(self):
seq = Sequence('', metadata={'foo': '\nabc\tdef G123'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': '\nabc\tdef G123'})
def test_init_metadata_multiple_keys(self):
seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata,
{'foo': 'abc', 42: {'nested': 'metadata'}})
def test_init_empty_positional_metadata(self):
# empty seq with missing/empty positional metadata
for empty in None, {}, pd.DataFrame():
seq = Sequence('', positional_metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# non-empty seq with missing positional metadata
seq = Sequence('xyz', positional_metadata=None)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_init_empty_positional_metadata_item(self):
for item in ([], (), np.array([])):
seq = Sequence('', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(0)))
def test_init_single_positional_metadata_item(self):
for item in ([2], (2, ), np.array([2])):
seq = Sequence('G', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(1)))
def test_init_multiple_positional_metadata_item(self):
for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(9)))
def test_init_positional_metadata_multiple_columns(self):
seq = Sequence('^' * 5,
positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_positional_metadata_with_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
seq = Sequence('^' * 5, positional_metadata=df)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with self.assertRaisesRegexp(TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with self.assertRaisesRegexp(TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with self.assertRaisesRegexp(TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with self.assertRaisesRegexp(TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with self.assertRaisesRegexp(TypeError, 'int'):
Sequence(42)
with self.assertRaisesRegexp(TypeError, 'float'):
Sequence(4.2)
with self.assertRaisesRegexp(TypeError, 'int64'):
Sequence(np.int_(50))
with self.assertRaisesRegexp(TypeError, 'float64'):
Sequence(np.float_(50))
with self.assertRaisesRegexp(TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_metadata(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegexp(TypeError,
'metadata must be a dict'):
Sequence('abc', metadata=md)
def test_init_invalid_positional_metadata(self):
# not consumable by Pandas
with self.assertRaisesRegexp(TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
Sequence('ACGT', positional_metadata=2)
# 0 elements
with self.assertRaisesRegexp(ValueError, '\(0\).*\(4\)'):
Sequence('ACGT', positional_metadata=[])
# not enough elements
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4])
# too many elements
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(3)))
# Series too many rows
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(5)}))
def test_values_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_metadata_property_getter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertIsInstance(seq.metadata, dict)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# update existing key
seq.metadata['foo'] = 'baz'
self.assertEqual(seq.metadata, {'foo': 'baz'})
# add new key
seq.metadata['foo2'] = 'bar2'
self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
def test_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
self.assertEqual(seq.metadata, {})
self.assertIsNotNone(seq._metadata)
def test_metadata_property_setter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
new_md = {'bar': 'baz', 42: 42}
seq.metadata = new_md
self.assertEqual(seq.metadata, new_md)
self.assertIsNot(seq.metadata, new_md)
seq.metadata = {}
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_metadata())
def test_metadata_property_setter_invalid_type(self):
seq = Sequence('abc', metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegexp(TypeError,
'metadata must be a dict'):
seq.metadata = md
# object should still be usable and its original metadata shouldn't
# have changed
self.assertEqual(seq.metadata, {123: 456})
def test_metadata_property_deleter(self):
md = {'foo': 'bar'}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting again
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting missing metadata immediately after instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
del seq.metadata
self.assertIsNone(seq._metadata)
def test_metadata_property_shallow_copy(self):
md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# updates to keys
seq.metadata['key1'] = 'new val'
self.assertEqual(seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
# original metadata untouched
self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
# updates to mutable value (by reference)
seq.metadata['key3'].append(3)
self.assertEqual(
seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
# original metadata changed because we didn't deep copy
self.assertEqual(
md,
{'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
def test_positional_metadata_property_getter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
# update existing column
seq.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# add new column
seq.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame(index=np.arange(4)))
self.assertIsNotNone(seq._positional_metadata)
def test_positional_metadata_property_setter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
new_md = pd.DataFrame({'bar': np.arange(3)}, index=['a', 'b', 'c'])
seq.positional_metadata = new_md
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': np.arange(3)}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, new_md)
seq.positional_metadata = pd.DataFrame(index=np.arange(3))
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
self.assertFalse(seq.has_positional_metadata())
def test_positional_metadata_property_setter_invalid_type(self):
# More extensive tests for invalid input are on Sequence.__init__ tests
seq = Sequence('abc', positional_metadata={'foo': [1, 2, 42]})
# not consumable by Pandas
with self.assertRaisesRegexp(TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
seq.positional_metadata = 2
# object should still be usable and its original metadata shouldn't
# have changed
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# wrong length
with self.assertRaisesRegexp(ValueError, '\(2\).*\(3\)'):
seq.positional_metadata = {'foo': [1, 2]}
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# None isn't valid when using setter (differs from constructor)
with self.assertRaisesRegexp(ValueError, '\(0\).*\(3\)'):
seq.positional_metadata = None
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_property_deleter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting again
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting missing positional metadata immediately after
# instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
def test_positional_metadata_property_shallow_copy(self):
# define metadata as a DataFrame because this has the potential to have
# its underlying data shared
md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, md)
# original metadata untouched
orig_md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
assert_data_frame_almost_equal(md, orig_md)
# change values of column (using same dtype)
seq.positional_metadata['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# change single value of underlying data
seq.positional_metadata.values[0][0] = 10
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [10, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# create column of object dtype -- these aren't deep copied
md = pd.DataFrame({'obj': [[], [], []]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[], [], []]}, index=np.arange(3)))
# mutate list
seq.positional_metadata['obj'][0].append(42)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[42], [], []]}, index=np.arange(3)))
# original metadata changed because we didn't do a full deep copy
assert_data_frame_almost_equal(
md,
pd.DataFrame({'obj': [[42], [], []]}, index=['a', 'b', 'c']))
def test_positional_metadata_property_set_column_series(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
seq.positional_metadata['bar'] = pd.Series(range(l-3))
# pandas.Series will be padded with NaN if too short
npt.assert_equal(seq.positional_metadata['bar'],
np.array(list(range(l-3)) + [np.NaN]*3))
seq.positional_metadata['baz'] = pd.Series(range(l+3))
# pandas.Series will be truncated if too long
npt.assert_equal(seq.positional_metadata['baz'],
np.array(range(l)))
def test_positional_metadata_property_set_column_array(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
# array-like objects will fail if wrong size
for array_like in (np.array(range(l-1)), range(l-1),
np.array(range(l+1)), range(l+1)):
with self.assertRaisesRegexp(ValueError,
"Length of values does not match "
"length of index"):
seq.positional_metadata['bar'] = array_like
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) ==
Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a",
metadata={'id': 'b', 'description': 'c'}) ==
Sequence("a",
metadata={'id': 'b', 'description': 'c'}))
self.assertTrue(Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}) ==
Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a", metadata={'id': 'c'}) !=
Sequence("a",
metadata={'id': 'c', 'description': 't'}))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a"))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a", positional_metadata={'quality': [2]}))
self.assertTrue(Sequence("c", positional_metadata={'quality': [3]}) !=
Sequence("b", positional_metadata={'quality': [3]}))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) !=
Sequence("c", metadata={'id': 'b'}))
def test_eq_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('') == Sequence(''))
self.assertTrue(Sequence('z') == Sequence('z'))
self.assertTrue(
Sequence('ACGT') == Sequence('ACGT'))
def test_eq_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
self.assertTrue(seq1 == seq2)
# order shouldn't matter
self.assertTrue(seq2 == seq1)
def test_eq_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': (1, 2, 3, 4)})
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': np.array([1, 2, 3,
4])})
self.assertTrue(seq1 == seq2)
def test_eq_type_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT', metadata={'id': 'bar'})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_positional_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 5]})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1 == seq2)
def test_eq_handles_missing_metadata_efficiently(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('ACGT')
self.assertTrue(seq1 == seq2)
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq1._metadata)
self.assertIsNone(seq1._positional_metadata)
self.assertIsNone(seq2._metadata)
self.assertIsNone(seq2._positional_metadata)
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_has_positional_metadata(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("S", {'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.array([0])})
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality':
np.array([len(seq) - 1])})
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': [10]})
self.assertEqual(seq[10], eseq)
def test_single_index_to_slice(self):
a = [1, 2, 3, 4]
self.assertEqual(slice(0, 1), _single_index_to_slice(0))
self.assertEqual([1], a[_single_index_to_slice(0)])
self.assertEqual(slice(-1, None),
_single_index_to_slice(-1))
self.assertEqual([4], a[_single_index_to_slice(-1)])
def test_is_single_index(self):
self.assertTrue(_is_single_index(0))
self.assertFalse(_is_single_index(True))
self.assertFalse(_is_single_index(bool()))
self.assertFalse(_is_single_index('a'))
def test_as_slice_if_single_index(self):
self.assertEqual(slice(0, 1), _as_slice_if_single_index(0))
slice_obj = slice(2, 3)
self.assertIs(slice_obj,
_as_slice_if_single_index(slice_obj))
def test_slice_positional_metadata(self):
seq = Sequence('ABCDEFGHIJ',
positional_metadata={'foo': np.arange(10),
'bar': np.arange(100, 110)})
self.assertTrue( | pd.DataFrame({'foo': [0], 'bar': [100]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy import signal
import os
def get_timedeltas(login_timestamps, return_floats=True):
"""
Helper function that returns the time differences (delta t's) between consecutive logins for a user.
We just input the datetime stamps as an index, hence this method will also work when called on a DataFrame of
customer logins.
Parameters:
login_timestamps (pd.Series): DatetimeIndex from a series or dataframe with user logins. Can be used on both binary
timeseries as returned by the method construct_binary_visit_series (see above) or from the DataFrame holding the
logins directly.
return_floats (bool): Whether or not to return the times as timedifferences (pd.Timedelta objects) or floats.
Returns:
timedeltas (list of objects): List of time differences, either in pd.Timedelta format or as floats.
"""
if len(login_timestamps.index) <= 1:
raise ValueError("Error: For computing time differences, the user must have more than one registered login")
#get the dates on which the customer visited the gym
timedeltas = pd.Series(login_timestamps.diff().values, index=login_timestamps.values)
#realign the series so that a value on a given date represents the time in days until the next visit
timedeltas.shift(-1)
timedeltas.dropna(inplace=True)
if return_floats:
timedeltas = timedeltas / pd.Timedelta(days=1)
return timedeltas
def write_timedeltas_to_file(login_data, filename, is_sorted=False, num_users=None, minimum_deltas=2, verbose=False, compression="infer"):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: pd.DataFrame, login_data for analysis
filename: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
compression: pandas compression type
"""
if os.path.exists(os.getcwd() + "/" + filename):
print("The file specified already exists. It will be overwritten in the process.")
os.remove(filename)
#get all visits from
visit_numbers = login_data["CUST_CODE"].value_counts().astype(int)
#visit number must be larger than minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.isin(eligibles.index)]
login_data_cleaned = login_data.drop(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = len(login_data_cleaned.index)
if num_users is None:
num_users = len(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
nonsense_counts = 0
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
count += 1
if verbose and (count % 100 == 0 or count == num_users):
print("Processed {} customers out of {}".format(count, num_users))
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED #pd.DatetimeIndex([visit_date for visit_date in customer_logins.DATE_SAVED])
#extract the timedeltas
timedeltas = get_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we drop it
timedeltas.dropna(inplace=True)
#logins with timedelta under 5 minutes are dropped
thresh = 5 * (1 / (24 * 60))
#drop all timedeltas under the threshold
eligible_tds = timedeltas[timedeltas > thresh]
if len(eligible_tds.index) < minimum_deltas:
nonsense_counts += 1
index += customer_visits
continue
timedeltas_df = eligible_tds.to_frame().T
#mode='a' ensures that the data are appended instead of overwritten
timedeltas_df.to_csv(filename, mode='a', header=False, compression=compression, index=False, sep=";")
if count >= num_users:
break
index += customer_visits
print("Found {} users with too many artefact logins".format(nonsense_counts))
def get_timedelta_sample(login_data, is_sorted=False, num_users=None, minimum_deltas=2, verbose=False):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: pd.DataFrame, login_data for analysis
filename: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
"""
#get all visits from
visit_numbers = login_data["CUST_CODE"].value_counts().astype(int)
#visit number must be larger than minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.isin(eligibles.index)]
login_data_cleaned = login_data.drop(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = len(login_data_cleaned.index)
if num_users is None:
num_users = len(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
delta_index = 0
num_deltas = eligibles.sum() - len(eligibles.index)
timedelta_sample = np.zeros(num_deltas)
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED
#extract the timedeltas
timedeltas = get_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we drop it
timedeltas.dropna(inplace=True)
#add list
try:
timedelta_sample[delta_index:delta_index+customer_visits-1] = timedeltas.values
except:
print("#index: {}".format(index))
print("#length of td vector: {}".format(num_deltas))
count += 1
if count >= num_users:
if verbose:
print("Checked {} customers out of {}".format(count, num_users))
break
if verbose and (count % 100 == 0):
print("Checked {} customers out of {}".format(count, num_users))
index += customer_visits
delta_index += customer_visits - 1
#threshold of 5 minutes to sort out artifact logins
thresh = 5 * (1 / (24 * 60))
td_sample = | pd.Series(timedelta_sample) | pandas.Series |
import pandas as pd
import numpy as np
import torch
from scipy.io import arff
from abc import ABC, abstractmethod
from torch.utils.data import DataLoader, TensorDataset
class BaseADDataset(ABC):
"""Anomaly detection dataset base class."""
def __init__(self, root: str):
super().__init__()
self.root = root # root path to data
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = None # tuple with original class labels that define the normal class
self.outlier_classes = None # tuple with original class labels that define the outlier class
self.train_set = None # must be of type torch.utils.data.Dataset
self.test_set = None # must be of type torch.utils.data.Dataset
@abstractmethod
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
"""Implement data loaders of type torch.utils.data.DataLoader for train_set and test_set."""
pass
def __repr__(self):
return self.__class__.__name__
class TorchvisionDataset(BaseADDataset):
"""TorchvisionDataset class for datasets already implemented in torchvision.datasets."""
def __init__(self, root: str):
super().__init__(root)
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train,
num_workers=num_workers)
test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test,
num_workers=num_workers)
return train_loader, test_loader
class SAD_Dataset(TorchvisionDataset):
def __init__(self, root: str, normal_class):
super().__init__(root)
self.n_classes = 2
self.normal_class = normal_class
# train set
#load data file path
url1_train = 'data/sad/SpokenArabicDigitsDimension1_TRAIN.arff'
url2_train = 'data/sad/SpokenArabicDigitsDimension2_TRAIN.arff'
url3_train = 'data/sad/SpokenArabicDigitsDimension3_TRAIN.arff'
url4_train = 'data/sad/SpokenArabicDigitsDimension4_TRAIN.arff'
url5_train = 'data/sad/SpokenArabicDigitsDimension5_TRAIN.arff'
url6_train = 'data/sad/SpokenArabicDigitsDimension6_TRAIN.arff'
url7_train = 'data/sad/SpokenArabicDigitsDimension7_TRAIN.arff'
url8_train = 'data/sad/SpokenArabicDigitsDimension8_TRAIN.arff'
url9_train = 'data/sad/SpokenArabicDigitsDimension9_TRAIN.arff'
url10_train = 'data/sad/SpokenArabicDigitsDimension10_TRAIN.arff'
url11_train = 'data/sad/SpokenArabicDigitsDimension11_TRAIN.arff'
url12_train = 'data/sad/SpokenArabicDigitsDimension12_TRAIN.arff'
url13_train = 'data/sad/SpokenArabicDigitsDimension13_TRAIN.arff'
# get x and y as dataframe
x_dim1_train, target_train = get_data(url1_train)
x_dim2_train, __ = get_data(url2_train)
x_dim3_train, __ = get_data(url3_train)
x_dim4_train, __ = get_data(url4_train)
x_dim5_train, __ = get_data(url5_train)
x_dim6_train, __ = get_data(url6_train)
x_dim7_train, __ = get_data(url7_train)
x_dim8_train, __ = get_data(url8_train)
x_dim9_train, __ = get_data(url9_train)
x_dim10_train, __ = get_data(url10_train)
x_dim11_train, __ = get_data(url11_train)
x_dim12_train, __ = get_data(url12_train)
x_dim13_train, __ = get_data(url13_train)
x_dim1_train = get_features(x_dim1_train)
x_dim2_train = get_features(x_dim2_train)
x_dim3_train = get_features(x_dim3_train)
x_dim4_train = get_features(x_dim4_train)
x_dim5_train = get_features(x_dim5_train)
x_dim6_train = get_features(x_dim6_train)
x_dim7_train = get_features(x_dim7_train)
x_dim8_train = get_features(x_dim8_train)
x_dim9_train = get_features(x_dim9_train)
x_dim10_train = get_features(x_dim10_train)
x_dim11_train = get_features(x_dim11_train)
x_dim12_train = get_features(x_dim12_train)
x_dim13_train = get_features(x_dim13_train)
# combine 13 dimensions of x
x_train = np.dstack([x_dim1_train, x_dim2_train, x_dim3_train, x_dim4_train, x_dim5_train, x_dim6_train, x_dim7_train, x_dim8_train, x_dim9_train, x_dim10_train, x_dim11_train, x_dim12_train, x_dim13_train])
# process output y and produce index
y_train, index_train = get_target(target_train, normal_class)
# train only on normal data, extracting normal data
x_final_train, y_final_train, index_final_train = get_training_set(x_train, y_train, index_train)
# print("size: ", x_final_train.shape)
train_set = TensorDataset(torch.Tensor(x_final_train), torch.Tensor(y_final_train), torch.Tensor(index_final_train))
self.train_set = train_set
# set up testing set
url1_test = 'data/sad/SpokenArabicDigitsDimension1_TEST.arff'
url2_test = 'data/sad/SpokenArabicDigitsDimension2_TEST.arff'
url3_test = 'data/sad/SpokenArabicDigitsDimension3_TEST.arff'
url4_test = 'data/sad/SpokenArabicDigitsDimension4_TEST.arff'
url5_test = 'data/sad/SpokenArabicDigitsDimension5_TEST.arff'
url6_test = 'data/sad/SpokenArabicDigitsDimension6_TEST.arff'
url7_test = 'data/sad/SpokenArabicDigitsDimension7_TEST.arff'
url8_test = 'data/sad/SpokenArabicDigitsDimension8_TEST.arff'
url9_test = 'data/sad/SpokenArabicDigitsDimension9_TEST.arff'
url10_test = 'data/sad/SpokenArabicDigitsDimension10_TEST.arff'
url11_test = 'data/sad/SpokenArabicDigitsDimension11_TEST.arff'
url12_test = 'data/sad/SpokenArabicDigitsDimension12_TEST.arff'
url13_test = 'data/sad/SpokenArabicDigitsDimension13_TEST.arff'
x_dim1_test, target_test = get_data(url1_test)
x_dim2_test, __ = get_data(url2_test)
x_dim3_test, __ = get_data(url3_test)
x_dim4_test, __ = get_data(url4_test)
x_dim5_test, __ = get_data(url5_test)
x_dim6_test, __ = get_data(url6_test)
x_dim7_test, __ = get_data(url7_test)
x_dim8_test, __ = get_data(url8_test)
x_dim9_test, __ = get_data(url9_test)
x_dim10_test, __ = get_data(url10_test)
x_dim11_test, __ = get_data(url11_test)
x_dim12_test, __ = get_data(url12_test)
x_dim13_test, __ = get_data(url13_test)
x_dim1_test = get_features(x_dim1_test)
x_dim2_test = get_features(x_dim2_test)
x_dim3_test = get_features(x_dim3_test)
x_dim4_test = get_features(x_dim4_test)
x_dim5_test = get_features(x_dim5_test)
x_dim6_test = get_features(x_dim6_test)
x_dim7_test = get_features(x_dim7_test)
x_dim8_test = get_features(x_dim8_test)
x_dim9_test = get_features(x_dim9_test)
x_dim10_test = get_features(x_dim10_test)
x_dim11_test = get_features(x_dim11_test)
x_dim12_test = get_features(x_dim12_test)
x_dim13_test = get_features(x_dim13_test)
x_final_test = np.dstack([x_dim1_test, x_dim2_test, x_dim3_test, x_dim4_test, x_dim5_test, x_dim6_test, x_dim7_test, x_dim8_test, x_dim9_test, x_dim10_test, x_dim11_test, x_dim12_test, x_dim13_test])
y_final_test, index_test = get_target(target_test, normal_class)
test_set = TensorDataset(torch.Tensor(x_final_test), torch.Tensor(y_final_test), torch.Tensor(index_test))
self.test_set = test_set
def get_data(url):
"""
input: path to arff data file
This function loads the arff file, then converts into dataframe.
The dataframe is then split into x and y.
output: x is dataframe object without the last column. y is series.
"""
loaded = arff.loadarff(url)
df = | pd.DataFrame(loaded[0]) | pandas.DataFrame |
# Copyright (c) 2021 ING Wholesale Banking Advanced Analytics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
import multiprocessing
import warnings
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from ..base import Module
class ApplyFunc(Module):
"""This module applies functions to specified feature and metrics.
Extra parameters (kwargs) can be passed to the apply function.
"""
def __init__(
self,
apply_to_key,
store_key="",
assign_to_key="",
apply_funcs_key="",
features=None,
apply_funcs=None,
metrics=None,
msg="",
):
"""Initialize an instance of ApplyFunc.
:param str apply_to_key: key of the input data to apply funcs to.
:param str assign_to_key: key of the input data to assign function applied-output to. (optional)
:param str store_key: key of the output data to store in the datastore (optional)
:param str apply_funcs_key: key of to-be-applied functions in data to store (optional)
:param list features: list of features to pick up from input data and apply funcs to (optional)
:param list metrics: list of metrics to apply funcs to (optional)
:param str msg: message to print out at start of transform function. (optional)
:param list apply_funcs: functions to apply (list of dicts):
- 'func': function to apply
- 'suffix' (string, optional): suffix added to each metric. default is function name.
- 'prefix' (string, optional): prefix added to each metric.
- 'features' (list, optional): features the function is applied to. Overwrites features above
- 'metrics' (list, optional): metrics the function is applied to. Overwrites metrics above
- 'entire' (boolean, optional): apply function to the entire feature's dataframe of metrics?
- 'args' (tuple, optional): args for 'func'
- 'kwargs' (dict, optional): kwargs for 'func'
"""
super().__init__()
self.apply_to_key = apply_to_key
self.assign_to_key = self.apply_to_key if not assign_to_key else assign_to_key
self.store_key = self.assign_to_key if not store_key else store_key
self.apply_funcs_key = apply_funcs_key
self.features = features or []
self.metrics = metrics or []
self.msg = msg
self.apply_funcs = []
# import applied functions
apply_funcs = apply_funcs or []
for af in apply_funcs:
self.add_apply_func(**af)
def add_apply_func(
self,
func,
suffix=None,
prefix=None,
metrics=[],
features=[],
entire=None,
*args,
**kwargs,
):
"""Add function to be applied to dataframe.
Can call this function after module instantiation to add new functions.
:param func: function to apply
:param suffix: (string, optional) suffix added to each metric. default is function name.
:param prefix: (string, optional) prefix added to each metric.
:param features: (list, optional) features the function is applied to. Overwrites features above
:param metrics: (list, optional) metrics the function is applied to. Overwrites metrics above
:param entire: (boolean, optional) apply function to the entire feature's dataframe of metrics?
:param args: (tuple, optional) args for 'func'
:param kwargs: (dict, optional) kwargs for 'func'
"""
# check inputs
if not callable(func):
raise TypeError("functions in ApplyFunc must be callable objects")
if suffix is not None and not isinstance(suffix, str):
raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.")
if prefix is not None and not isinstance(prefix, str):
raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.")
if not isinstance(metrics, list) or not isinstance(features, list):
raise TypeError("metrics and features must be lists of strings.")
# add function
self.apply_funcs.append(
{
"features": features,
"metrics": metrics,
"func": func,
"entire": entire,
"suffix": suffix,
"prefix": prefix,
"args": args,
"kwargs": kwargs,
}
)
def transform(self, datastore):
"""
Apply functions to specified feature and metrics
Each feature/metric combination is treated as a pandas series
:param datastore: input datastore
:return: updated datastore
:rtype: dict
"""
if self.msg:
self.logger.info(self.msg)
apply_to_data = self.get_datastore_object(
datastore, self.apply_to_key, dtype=dict
)
assign_to_data = self.get_datastore_object(
datastore, self.assign_to_key, dtype=dict, default={}
)
if self.apply_funcs_key:
apply_funcs = self.get_datastore_object(
datastore, self.apply_funcs_key, dtype=list
)
self.apply_funcs += apply_funcs
features = self.get_features(apply_to_data.keys())
num_cores = multiprocessing.cpu_count()
same_key = self.assign_to_key == self.apply_to_key
res = Parallel(n_jobs=num_cores)(
delayed(apply_func_array)(
feature=feature,
metrics=self.metrics,
apply_to_df=self.get_datastore_object(
apply_to_data, feature, dtype=pd.DataFrame
),
assign_to_df=None
if same_key
else self.get_datastore_object(
assign_to_data, feature, dtype=pd.DataFrame, default=pd.DataFrame()
),
apply_funcs=self.apply_funcs,
same_key=same_key,
)
for feature in features
)
new_metrics = {r[0]: r[1] for r in res}
# storage
datastore[self.store_key] = new_metrics
return datastore
def apply_func_array(
feature, metrics, apply_to_df, assign_to_df, apply_funcs, same_key
):
"""Apply list of functions to dataframe
Split off for parallellization reasons
:param str feature: feature currently looping over
:param list metrics: list of selected metrics to apply functions to
:param apply_to_df: pandas data frame that function in arr is applied to
:param assign_to_df: pandas data frame the output of function is assigned to
:param apply_funcs: list of functions to apply to
:param same_key: if True, merge apply_to_df and assign_to_df before returning assign_to_df
:return: untion of feature and assign_to_df
"""
if not isinstance(apply_to_df, pd.DataFrame):
raise TypeError(
f'apply_to_df of feature "{feature}" is not a pandas dataframe.'
)
if same_key or (len(assign_to_df.index) == 0 and len(assign_to_df.columns) == 0):
assign_to_df = pd.DataFrame(index=apply_to_df.index)
for arr in apply_funcs:
obj = apply_func(feature, metrics, apply_to_df, arr)
if len(obj) == 0:
# no metrics were found in apply_to_df
continue
for new_metric, o in obj.items():
if isinstance(o, pd.Series):
if len(assign_to_df.index) == len(o) and all(
assign_to_df.index == o.index
):
assign_to_df[new_metric] = o
else:
warnings.warn(
f"{feature}:{new_metric}: df_out and object have inconsistent lengths."
)
else:
# o is number or object, assign to every element of new column
assign_to_df[new_metric] = [o] * len(assign_to_df.index)
if same_key:
assign_to_df = pd.concat([apply_to_df, assign_to_df], axis=1)
return feature, assign_to_df
def apply_func(feature, selected_metrics, df, arr):
"""Apply function to dataframe
:param str feature: feature currently looping over
:param list selected_metrics: list of selected metrics to apply to
:param df: pandas data frame that function in arr is applied to
:param dict arr: dictionary containing the function to be applied to pandas dataframe.
:return: dictionary with outputs of applied-to metric pd.Series
"""
# basic checks of feature
if "features" in arr and len(arr["features"]) > 0:
if feature not in arr["features"]:
return {}
# get func input
keys = list(arr.keys())
assert "func" in keys, "function input is insufficient."
func = arr["func"]
if "prefix" not in keys or arr["prefix"] is None:
arr["prefix"] = ""
if len(arr["prefix"]) > 0 and not arr["prefix"].endswith("_"):
arr["prefix"] = arr["prefix"] + "_"
if "suffix" not in keys or arr["suffix"] is None:
arr["suffix"] = func.__name__ if len(arr["prefix"]) == 0 else ""
if len(arr["suffix"]) > 0 and not arr["suffix"].startswith("_"):
arr["suffix"] = "_" + arr["suffix"]
suffix = arr["suffix"]
prefix = arr["prefix"]
args = ()
kwargs = {}
if "kwargs" in keys:
kwargs = arr["kwargs"]
if "args" in keys:
args = arr["args"]
# apply func
if len(selected_metrics) > 0 or ("metrics" in keys and len(arr["metrics"]) > 0):
metrics = (
arr["metrics"]
if ("metrics" in keys and len(arr["metrics"]) > 0)
else selected_metrics
)
metrics = [m for m in metrics if m in df.columns]
# assert all(m in df.columns for m in metrics)
if len(metrics) == 0:
return {}
df = df[metrics] if len(metrics) >= 2 else df[metrics[0]]
if (
"entire" in arr
and arr["entire"] is not None
and arr["entire"] is not False
and arr["entire"] != 0
):
obj = func(df, *args, **kwargs)
else:
obj = df.apply(func, args=args, **kwargs)
# convert object to dict format
if not isinstance(
obj, (pd.Series, pd.DataFrame, list, tuple, np.ndarray)
) and isinstance(df, pd.Series):
obj = {df.name: obj}
elif not isinstance(
obj, (pd.Series, pd.DataFrame, list, tuple, np.ndarray)
) and isinstance(df, pd.DataFrame):
obj = {"_".join(df.columns): obj}
elif (
isinstance(obj, (list, tuple, np.ndarray))
and isinstance(df, pd.DataFrame)
and len(df.columns) == len(obj)
):
obj = {c: o for c, o in zip(df.columns, obj)}
elif (
isinstance(obj, (list, tuple, np.ndarray))
and isinstance(df, pd.Series)
and len(df.index) == len(obj)
):
obj = {df.name: pd.Series(data=obj, index=df.index)}
elif (
isinstance(obj, (list, tuple, np.ndarray))
and isinstance(df, pd.DataFrame)
and len(df.index) == len(obj)
):
obj = {"_".join(df.columns): | pd.Series(data=obj, index=df.index) | pandas.Series |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCaterogyEncoder(unittest.TestCase):
def test_inverse_transform_1(self):
"""
Test no preprocessing
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
original = inverse_transform(train)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_2(self):
"""
Test multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]})
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]})
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
result1 = enc_onehot.transform(test)
result2 = enc_binary.transform(result1)
result3 = enc_ordinal.transform(result2)
result4 = enc_basen.transform(result3)
result5 = enc_target.transform(result4)
original = inverse_transform(result5, [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_3(self):
"""
Test target encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago', 'chicago'],
'state': ['US', 'FR', 'FR', 'US', 'US'],
'other': ['A', 'A', np.nan, 'B', 'B']})
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1, 0, 1], columns=['y'])
enc = ce.TargetEncoder(cols=['city', 'state']).fit(train, y)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_4(self):
"""
Test ordinal encoding
"""
train = pd.DataFrame({'city': ['chicago', 'st louis']})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_5(self):
"""
Test inverse_transform having Nan in train and handle missing value expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_6(self):
"""
test inverse_transform having Nan in train and handle missing return Nan expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_7(self):
"""
test inverse_transform both fields are return Nan with Nan Expect ValueError Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_8(self):
"""
test inverse_transform having missing and no Uknown expect inversed ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_9(self):
"""
test inverse_transform having handle missing value and handle unknown return Nan expect best inverse ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_10(self):
"""
test inverse_transform with multiple ordinal
"""
data = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['a', 'b']})
test = pd.DataFrame({'city': [1, 2, 2],
'state': [1, 2, 2],
'other': ['a', 'b', 'a']})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['a', 'b', 'a']})
enc = ce.OrdinalEncoder(cols=['city', 'state'])
enc.fit(data)
original = inverse_transform(test, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_11(self):
"""
Test binary encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'paris', 'monaco'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
expected = pd.DataFrame({'city': ['chicago', 'paris', np.nan],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, 'B']})
enc = ce.BinaryEncoder(cols=['city', 'state']).fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inverse_transform_12(self):
"""
test inverse_transform having data expecting a returned result
"""
train = pd.Series(list('abcd')).to_frame('letter')
enc = ce.BaseNEncoder(base=2)
result = enc.fit_transform(train)
inversed_result = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, inversed_result)
def test_inverse_transform_13(self):
"""
Test basen encoding
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_14(self):
"""
test inverse_transform having Nan in train and handle missing expected a result with Nan
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.BaseNEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_15(self):
"""
test inverse_transform having missing and no unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_16(self):
"""
test inverse_transform having handle missing value and Unknown
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]})
enc = ce.BaseNEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_17(self):
"""
test inverse_transform with multiple baseN
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
test = pd.DataFrame({'city_0': [0, 1],
'city_1': [1, 0],
'state_0': [0, 1],
'state_1': [1, 0]})
enc = ce.BaseNEncoder(cols=['city', 'state'], handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
original = inverse_transform(test, enc)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_18(self):
"""
Test Onehot encoding
"""
encoder = ce.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inversed_result = inverse_transform(transformed, encoder)
pd.testing.assert_frame_equal(value, inversed_result)
def test_inverse_transform_19(self):
"""
test inverse_transform having no categories names
"""
encoder = ce.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inversed_result = inverse_transform(transformed, encoder)
pd.testing.assert_frame_equal(value, inversed_result)
def test_inverse_transform_20(self):
"""
test inverse_transform with Nan in training expecting Nan_Onehot returned result
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OneHotEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_21(self):
"""
test inverse_transform with Nan in training expecting Nan_Onehot returned result
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OneHotEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
| pd.testing.assert_frame_equal(train, original) | pandas.testing.assert_frame_equal |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf = pd.HDFStore('/home/ubuntu/data/jiahao/files/train.hdf5', mode="w")
hdf.put(value=merged_df, key="df")
#%%
os.chdir('./train_1')
mzid_files_1=glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_1, key="df1")
# %%
os.chdir('./train_2')
mzid_files_2=glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_2, key="df2")
#%%
os.chdir('./train_3')
mzid_files_3 = glob.glob('*.mzid')
indexed_mzid_3 = mzid.chain.from_iterable(mzid_files_3, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_3 = []
for entry in(indexed_mzid_3):
all_mzid_3.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_3)
mzid_df_3 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_3 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_3.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_3)
spectra_df_3 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_3 = pd.merge(mzid_df_3,spectra_df_3,how='left',on=['file','id'])
merged_df_3 = merged_df_3[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_3, key="df3")
#%%
os.chdir('./train_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_4 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_4.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_4)
spectra_df_4= pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_4 = pd.merge(mzid_df_4,spectra_df_4,how='left',on=['file','id'])
merged_df_4 = merged_df_4[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_4, key="df4")
#%%
os.chdir('./train_5')
mzid_files_5 = glob.glob('*.mzid')
indexed_mzid_5 = mzid.chain.from_iterable(mzid_files_5, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_5 = []
for entry in(indexed_mzid_5):
all_mzid_5.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_5)
mzid_df_5 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_5 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_5.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_5)
spectra_df_5 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_5 = pd.merge(mzid_df_5,spectra_df_5,how='left',on=['file','id'])
merged_df_5 = merged_df_5[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_5, key="df5")
#%%
os.chdir('./train_6')
mzid_files_6 = glob.glob('*.mzid')
indexed_mzid_6 = mzid.chain.from_iterable(mzid_files_6, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_6 = []
for entry in(indexed_mzid_6):
all_mzid_6.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_6)
mzid_df_6 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_6 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_6.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_6)
spectra_df_6 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_6 = pd.merge(mzid_df_6,spectra_df_6,how='left',on=['file','id'])
merged_df_6 = merged_df_6[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_6, key="df6")
#%%
os.chdir('./train_7')
mzid_files_7 = glob.glob('*.mzid')
indexed_mzid_7 = mzid.chain.from_iterable(mzid_files_7, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_7 = []
for entry in(indexed_mzid_7):
all_mzid_7.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_7)
mzid_df_7 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_7 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_7)
spectra_df_7 = | pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities}) | pandas.DataFrame |
import os
from io import StringIO
from typing import List
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import roadrunner
import seaborn as sns
import tellurium as te
from tellurium.roadrunner.extended_roadrunner import ExtendedRoadRunner
from sres import SRES
mpl.use("TkAgg")
roadrunner.Logger.setLevel(roadrunner.Logger.LOG_CRITICAL)
roadrunner.Logger.disableConsoleLogging()
def freeParameters(self):
return ["v_0", "ra_0", "kf_0", "kr_0", "Kma_0", "Kms_0", "Kmp_0", "wa_0", "ms_0",
"mp_0", "v_1", "ri_1", "kf_1", "kr_1", "Kmi_1", "Kms_1", "Kmp_1", "wi_1",
"ms_1", "mp_1", "v_2", "ri1_2", "ri2_2", "ri3_2", "kf_2", "kr_2",
"Kmi1_2", "Kmi2_2", "Kmi3_2", "Kms_2", "Kmp_2", "wi1_2", "wi2_2", "wi3_2",
"ms_2", "mp_2", "v_3", "kf_3", "kr_3", "Kms_3", "Kmp_3", "ms_3", "mp_3"]
ExtendedRoadRunner.freeParameters = freeParameters
r = te.loada("""
function Fi(v, ri, kf, kr, i, s, p, Kmi, Kms, Kmp, wi, ms, mp)
((ri+(1-ri)*(1/(1+i/Kmi)))^wi)*(kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1)
end
function F0(v, kf, kr, s, p, Kms, Kmp, ms, mp)
(kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1)
end
function Fa(v, ra, kf, kr, a, s, p, Kma, Kms, Kmp, wa, ms, mp)
((ra+(1-ra)*((a/Kma)/(1+a/Kma)))^wa)*(kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1)
end
function Fiii(v, ri1, ri2, ri3, kf, kr, i1, i2, i3, s, p, Kmi1, Kmi2, Kmi3, Kms, Kmp, wi1, wi2, wi3, ms, mp)
((ri1+(1-ri1)*(1/(1+i1/Kmi1)))^wi1) * ((ri2+(1-ri2)*(1/(1+i2/Kmi2)))^wi2) * ((ri3+(1-ri3)*(1/(1+i3/Kmi3)))^wi3) * (kf*(s/Kms)^ms-kr*(p/Kmp)^mp)/((1+(s/Kms))^ms+(1+(p/Kmp))^mp-1)
end
model modular_EGFR_current_128()
// Reactions
FreeLigand: -> L; Fa(v_0, ra_0, kf_0, kr_0, Lp, E, L, Kma_0, Kms_0, Kmp_0, wa_0, ms_0, mp_0);
Phosphotyrosine: -> P; Fi(v_1, ri_1, kf_1, kr_1, Mig6, L, P, Kmi_1, Kms_1, Kmp_1, wi_1, ms_1, mp_1);
Ras: -> R; Fiii(v_2, ri1_2, ri2_2, ri3_2, kf_2, kr_2, Spry2, P, E, P, R, Kmi1_2, Kmi2_2, Kmi3_2, Kms_2, Kmp_2, wi1_2, wi2_2, wi3_2, ms_2, mp_2);
Erk: -> E; F0(v_3, kf_3, kr_3, R, E, Kms_3, Kmp_3, ms_3, mp_3);
// Species IVs
Lp = 100;
E = 0;
L = 1000;
Mig6 = 100;
P = 0;
Spry2 = 10000;
R = 0;
// Parameter values
v_0 = 1;
ra_0 = 1;
kf_0 = 1;
kr_0 = 1;
Kma_0 = 1;
Kms_0 = 1;
Kmp_0 = 1;
wa_0 = 1;
ms_0 = 1;
mp_0 = 1;
v_1 = 1;
ri_1 = 1;
kf_1 = 1;
kr_1 = 1;
Kmi_1 = 1;
Kms_1 = 1;
Kmp_1 = 1;
wi_1 = 1;
ms_1 = 1;
mp_1 = 1;
v_2 = 1;
ri1_2 = 1;
ri2_2 = 1;
ri3_2 = 1;
kf_2 = 1;
kr_2 = 1;
Kmi1_2 = 1;
Kmi2_2 = 1;
Kmi3_2 = 1;
Kms_2 = 1;
Kmp_2 = 1;
wi1_2 = 1;
wi2_2 = 1;
wi3_2 = 1;
ms_2 = 1;
mp_2 = 1;
v_3 = 1;
kf_3 = 1;
kr_3 = 1;
Kms_3 = 1;
Kmp_3 = 1;
ms_3 = 1;
mp_3 = 1;
end
""")
dataValues = np.genfromtxt(StringIO("""time,L,E,P,R
0,1000,0,0,0
100,887.381,809.746,156.585,2990.51
200,777.382,1651.78,143.552,2731.75
300,678.584,2462.54,131.11,2485.62
400,607.807,3018.03,121.584,2297.79
500,567.493,3204.74,115.929,2186.58
600,545.099,3229.01,112.75,2124.17
700,531.456,3202.71,110.814,2086.18
800,522.145,3164.21,109.495,2060.33
900,515.219,3126.44,108.515,2041.13
1000,509.802,3093.29,107.748,2026.11"""), delimiter=",", skip_header=True)
dataValues_df = | pd.DataFrame(dataValues, columns=["time", "L", "E", "P", "R"]) | pandas.DataFrame |
# flake8: noqa
import os
from carla import log
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
import pandas as pd
warnings.simplefilter(action="ignore", category=FutureWarning)
import argparse
from typing import Dict, Optional
import numpy as np
import yaml
from tensorflow import Graph, Session
from carla.data.api import Data
from carla.data.catalog import DataCatalog
from carla.evaluation import Benchmark
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.models.negative_instances import predict_negative_instances
from carla.recourse_methods import *
from carla.recourse_methods.api import RecourseMethod
def save_result(result: pd.DataFrame, alt_path: Optional[str]) -> None:
data_home = os.environ.get("CF_DATA", os.path.join("~", "carla", "results"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
path = os.path.join(data_home, "results.csv") if alt_path is None else alt_path
result.to_csv(path, index=False)
def load_setup() -> Dict:
with open("experimental_setup.yaml", "r") as f:
setup_catalog = yaml.safe_load(f)
return setup_catalog["recourse_methods"]
def initialize_recourse_method(
method: str,
mlmodel: MLModel,
data: Data,
data_name: str,
model_type: str,
setup: Dict,
sess: Session = None,
) -> RecourseMethod:
if method not in setup.keys():
raise KeyError("Method not in experimental setup")
hyperparams = setup[method]["hyperparams"]
if method == "ar":
coeffs, intercepts = None, None
if model_type == "linear":
# get weights and bias of linear layer for negative class 0
coeffs = mlmodel.raw_model.layers[0].get_weights()[0][:, 0]
intercepts = np.array(mlmodel.raw_model.layers[0].get_weights()[1][0])
ar = ActionableRecourse(mlmodel, hyperparams, coeffs, intercepts)
act_set = ar.action_set
# some datasets need special configuration for possible actions
if data_name == "give_me_some_credit":
act_set["NumberOfTimes90DaysLate"].mutable = False
act_set["NumberOfTimes90DaysLate"].actionable = False
act_set["NumberOfTime60-89DaysPastDueNotWorse"].mutable = False
act_set["NumberOfTime60-89DaysPastDueNotWorse"].actionable = False
ar.action_set = act_set
return ar
elif "cem" in method:
hyperparams["data_name"] = data_name
return CEM(sess, mlmodel, hyperparams)
elif method == "clue":
hyperparams["data_name"] = data_name
return Clue(data, mlmodel, hyperparams)
elif method == "dice":
return Dice(mlmodel, hyperparams)
elif "face" in method:
return Face(mlmodel, hyperparams)
elif method == "gs":
return GrowingSpheres(mlmodel)
elif method == "revise":
hyperparams["data_name"] = data_name
# variable input layer dimension is first time here available
hyperparams["vae_params"]["layers"] = [
len(mlmodel.feature_input_order)
] + hyperparams["vae_params"]["layers"]
return Revise(mlmodel, data, hyperparams)
elif "wachter" in method:
return Wachter(mlmodel, hyperparams)
else:
raise ValueError("Recourse method not known")
parser = argparse.ArgumentParser(description="Run experiments from paper")
parser.add_argument(
"-d",
"--dataset",
nargs="*",
default=["adult", "compas", "give_me_some_credit"],
choices=["adult", "compas", "give_me_some_credit"],
help="Datasets for experiment",
)
parser.add_argument(
"-t",
"--type",
nargs="*",
default=["ann", "linear"],
choices=["ann", "linear"],
help="Model type for experiment",
)
parser.add_argument(
"-r",
"--recourse_method",
nargs="*",
default=[
"dice",
"ar",
"cem",
"cem-vae",
"clue",
"face_knn",
"face_epsilon",
"gs",
"revise",
"wachter",
],
choices=[
"dice",
"ar",
"cem",
"cem-vae",
"clue",
"face_knn",
"face_epsilon",
"gs",
"revise",
"wachter",
],
help="Recourse methods for experiment",
)
parser.add_argument(
"-n",
"--number_of_samples",
type=int,
default=100,
help="Number of instances per dataset",
)
parser.add_argument(
"-p",
"--path",
type=str,
default=None,
help="Save path for the output csv. If None, the output is written to the cache.",
)
args = parser.parse_args()
setup = load_setup()
results = pd.DataFrame()
path = args.path
session_models = ["cem", "cem-vae"]
torch_methods = ["clue", "wachter", "revise"]
for rm in args.recourse_method:
backend = "tensorflow"
if rm in torch_methods:
backend = "pytorch"
for data_name in args.dataset:
dataset = DataCatalog(data_name)
for model_type in args.type:
log.info("=====================================")
log.info("Recourse method: {}".format(rm))
log.info("Dataset: {}".format(data_name))
log.info("Model type: {}".format(model_type))
if rm in session_models:
graph = Graph()
with graph.as_default():
ann_sess = Session()
with ann_sess.as_default():
mlmodel_sess = MLModelCatalog(dataset, model_type, backend)
factuals_sess = predict_negative_instances(
mlmodel_sess, dataset
)
factuals_sess = factuals_sess.iloc[: args.number_of_samples]
factuals_sess = factuals_sess.reset_index(drop=True)
recourse_method_sess = initialize_recourse_method(
rm,
mlmodel_sess,
dataset,
data_name,
model_type,
setup,
sess=ann_sess,
)
df_benchmark = Benchmark(
mlmodel_sess, recourse_method_sess, factuals_sess
).run_benchmark()
else:
mlmodel = MLModelCatalog(dataset, model_type, backend)
factuals = predict_negative_instances(mlmodel, dataset)
factuals = factuals.iloc[: args.number_of_samples]
factuals = factuals.reset_index(drop=True)
if rm == "dice":
mlmodel.use_pipeline = True
recourse_method = initialize_recourse_method(
rm, mlmodel, dataset, data_name, model_type, setup
)
df_benchmark = Benchmark(
mlmodel, recourse_method, factuals
).run_benchmark()
df_benchmark["Recourse_Method"] = rm
df_benchmark["Dataset"] = data_name
df_benchmark["ML_Model"] = model_type
df_benchmark = df_benchmark[
[
"Recourse_Method",
"Dataset",
"ML_Model",
"Distance_1",
"Distance_2",
"Distance_3",
"Distance_4",
"Constraint_Violation",
"Redundancy",
"y-Nearest-Neighbours",
"Success_Rate",
"Average_Time",
]
]
results = | pd.concat([results, df_benchmark], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tablizer.inputs import Inputs, Base
from tablizer.defaults import Units, Methods, Fields
from tablizer.tools import create_sqlite_database, check_inputs_table, insert, \
make_session, check_existing_records, delete_records, make_cnx_string
def summarize(array, date, methods, percentiles=[25, 75], decimals=3,
masks=None, mask_zero_values=False):
"""
Calculate basic summary statistics for 2D arrays or DataFrames.
Args
------
array {arr}: 2D array or DataFrame
date {str}: ('2019-8-18 23:00'), anything pd.to_datetime() can parse
methods {list}: (['mean','std']), strings of numpy functions to apply
percentiles {list}: ([low, high]), must supply when using 'percentile'
decimals {int}: rounding
masks {list}: mask outputs
mask_zero_values {bool}: mask zero values in array
Returns
------
result {DataFrame}: index = date, columns = methods
"""
method_options = Methods.options
if not isinstance(methods, list):
raise TypeError("methods must be a list")
if type(array) not in [np.ndarray, pd.core.frame.DataFrame]:
raise Exception('array type {} not valid'.format(type(array)))
if len(array.shape) != 2:
raise Exception('array must be 2D array or DataFrame')
if type(array) == pd.core.frame.DataFrame:
array = array.values
try:
date_time = | pd.to_datetime(date) | pandas.to_datetime |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pq.write_table(table,'test_collection.parquet')
testData = from_parquet('test_collection.parquet','time','category')
assert tsc == testData
os.remove('test_collection.parquet')
###########
def test_to_parquet_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,False,True,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,True,False,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas('test.parquet',tsc,True,True,'ignore')
def test_to_parquet_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
to_parquet(['test.parquet','label.parquet'],tsd,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet(['test.parquet','label.parquet'],tsc,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
to_parquet('test.parquet',tsd,False,False,'ignore',False)
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(test,df,False)
os.remove('test.parquet')
def test_to_parquet_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet('test.parquet',tsc,False,False,'ignore')
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
to_parquet('test.parquet',tsc,True,True,'ignore')
test = pq.read_table('test.parquet').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
os.remove('test.parquet')
class Test_Generator_IO:
def test_from_generator(self):
pass
def test_to_generator(self):
pass
class Test_Feather_IO:
def test_from_feather_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test.feather')
testData = from_feather('test.feather','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.feather')
def test_from_feather_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test_collection.feather')
testData = from_feather('test_collection.feather','time','category')
assert tsc == testData
os.remove('test_collection.feather')
###########
def test_to_feather_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_feather(
'test.feather',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pf.read_table('test.feather').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_feather(
'test.feather',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pf.read_table('test.feather').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.feather')
def test_to_feather_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = | pd.DataFrame(expect_collection_expandTime['remove']) | pandas.DataFrame |
import threading
import time
import datetime
import pandas as pd
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import timer
import parquet_creation as pcr
import glob
import os
import dask
import dask.dataframe as dd
class Singleton(type):
def __init__(cls, name, bases, attibutes):
cls._dict = {}
cls._registered = []
def __call__(cls, dateFrom=None, dateTo=None, *args):
print('* OBJECT DICT ', len(cls._dict), cls._dict)
if (dateFrom is None) or (dateTo is None):
defaultDT = hp.defaultTimeRange()
dateFrom = defaultDT[0]
dateTo = defaultDT[1]
if (dateFrom, dateTo) in cls._dict:
print('** OBJECT EXISTS', cls, dateFrom, dateTo)
instance = cls._dict[(dateFrom, dateTo)]
else:
print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo)
if (len(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered):
print('*** provide the latest and start thread', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
refresh = threading.Thread(target=cls.nextPeriodData, args=(dateFrom, dateTo, *args))
refresh.start()
elif ([dateFrom, dateTo] == cls._registered):
print('*** provide the latest', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
elif (len(cls._dict) == 0):
print('*** no data yet, refresh and wait', cls, dateFrom, dateTo)
cls.nextPeriodData(dateFrom, dateTo, *args)
instance = cls._dict[(dateFrom, dateTo)]
# keep only a few objects in memory
if len(cls._dict) >= 2:
cls._dict.pop(list(cls._dict.keys())[0])
return instance
def nextPeriodData(cls, dateFrom, dateTo, *args):
print(f'**** thread started for {cls}')
cls._registered = [dateFrom, dateTo]
instance = super().__call__(dateFrom, dateTo, *args)
cls._dict[(dateFrom, dateTo)] = instance
print(f'**** thread finished for {cls}')
class Updater(object):
def __init__(self):
self.StartThread()
@timer
def UpdateAllData(self):
print()
print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}')
print('Active threads:',threading.active_count())
# query period must be the same for all data loaders
defaultDT = hp.defaultTimeRange()
GeneralDataLoader(defaultDT[0], defaultDT[1])
SiteDataLoader(defaultDT[0], defaultDT[1])
PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1])
SitesRanksDataLoader(defaultDT[0], defaultDT[1])
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.UpdateAllData) # 1hour
thread.daemon = True
thread.start()
class ParquetUpdater(object):
def __init__(self):
self.StartThread()
@timer
def Update(self):
print('Starting Parquet Updater')
limit = pcr.limit
indices = pcr.indices
files = glob.glob('..\parquet\*')
print('files',files)
file_end = str(int(limit*24))
print('end of file trigger',file_end)
for f in files:
if f.endswith(file_end):
os.remove(f)
files = glob.glob('..\parquet\*')
print('files2',files)
for idx in indices:
j=int((limit*24)-1)
print('idx',idx,'j',j)
for f in files[::-1]:
file_end = str(idx)
end = file_end+str(j)
print('f',f,'end',end)
if f.endswith(end):
new_name = file_end+str(j+1)
head = '..\parquet\\'
final = head+new_name
print('f',f,'final',final)
os.rename(f,final)
j -= 1
jobs = []
limit = 1/24
timerange = pcr.queryrange(limit)
for idx in indices:
thread = threading.Thread(target=pcr.btwfunc,args=(idx,timerange))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
# print('Finished Querying')
for idx in indices:
filenames = pcr.ReadParquet(idx,limit)
if idx == 'ps_packetloss':
print(filenames)
plsdf = dd.read_parquet(filenames).compute()
print('Before drops',len(plsdf))
plsdf = plsdf.drop_duplicates()
print('After Drops',len(plsdf))
print('packetloss\n',plsdf)
if idx == 'ps_owd':
owddf = dd.read_parquet(filenames).compute()
print('owd\n',owddf)
if idx == 'ps_retransmits':
rtmdf = dd.read_parquet(filenames).compute()
print('retransmits\n',rtmdf)
if idx == 'ps_throughput':
trpdf = dd.read_parquet(filenames).compute()
print('throughput\n',trpdf)
print('dask df complete')
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.Update) # 1hour
thread.daemon = True
thread.start()
class GeneralDataLoader(object, metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.lastUpdated = None
self.pls = pd.DataFrame()
self.owd = pd.DataFrame()
self.thp = pd.DataFrame()
self.rtm = pd.DataFrame()
self.UpdateGeneralInfo()
@property
def dateFrom(self):
return self._dateFrom
@dateFrom.setter
def dateFrom(self, value):
self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def dateTo(self):
return self._dateTo
@dateTo.setter
def dateTo(self, value):
self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def lastUpdated(self):
return self._lastUpdated
@lastUpdated.setter
def lastUpdated(self, value):
self._lastUpdated = value
@timer
def UpdateGeneralInfo(self):
# print("last updated: {0}, new start: {1} new end: {2} ".format(self.lastUpdated, self.dateFrom, self.dateTo))
self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).df
self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).df
self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).df
self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).df
self.latency_df = pd.merge(self.pls, self.owd, how='outer')
self.throughput_df = pd.merge(self.thp, self.rtm, how='outer')
all_df = pd.merge(self.latency_df, self.throughput_df, how='outer')
self.all_df = all_df.drop_duplicates()
self.pls_related_only = self.pls[self.pls['host_in_ps_meta'] == True]
self.owd_related_only = self.owd[self.owd['host_in_ps_meta'] == True]
self.thp_related_only = self.thp[self.thp['host_in_ps_meta'] == True]
self.rtm_related_only = self.rtm[self.rtm['host_in_ps_meta'] == True]
self.latency_df_related_only = self.latency_df[self.latency_df['host_in_ps_meta'] == True]
self.throughput_df_related_only = self.throughput_df[self.throughput_df['host_in_ps_meta'] == True]
self.all_df_related_only = self.all_df[self.all_df['host_in_ps_meta'] == True]
self.all_tested_pairs = self.getAllTestedPairs()
self.lastUpdated = datetime.now()
def getAllTestedPairs(self):
all_df = self.all_df[['host', 'ip']]
df = pd.DataFrame(qrs.queryAllTestedPairs([self.dateFrom, self.dateTo]))
df = pd.merge(all_df, df, left_on='ip', right_on='src', how='right')
df = pd.merge(all_df, df, left_on='ip', right_on='dest', how='right', suffixes=('_dest', '_src'))
df.drop_duplicates(keep='first', inplace=True)
df = df.sort_values(['host_src', 'host_dest'])
df['host_dest'] = df['host_dest'].fillna('N/A')
df['host_src'] = df['host_src'].fillna('N/A')
df['source'] = df[['host_src', 'src']].apply(lambda x: ': '.join(x), axis=1)
df['destination'] = df[['host_dest', 'dest']].apply(lambda x: ': '.join(x), axis=1)
# df = df.sort_values(by=['host_src', 'host_dest'], ascending=False)
df = df[['host_dest', 'host_src', 'idx', 'src', 'dest', 'source', 'destination']]
return df
class SiteDataLoader(object, metaclass=Singleton):
genData = GeneralDataLoader()
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.UpdateSiteData()
def UpdateSiteData(self):
# print('UpdateSiteData >>> ', h self.dateFrom, self.dateTo)
pls_site_in_out = self.InOutDf("ps_packetloss", self.genData.pls_related_only)
self.pls_data = pls_site_in_out['data']
self.pls_dates = pls_site_in_out['dates']
owd_site_in_out = self.InOutDf("ps_owd", self.genData.owd_related_only)
self.owd_data = owd_site_in_out['data']
self.owd_dates = owd_site_in_out['dates']
thp_site_in_out = self.InOutDf("ps_throughput", self.genData.thp_related_only)
self.thp_data = thp_site_in_out['data']
self.thp_dates = thp_site_in_out['dates']
rtm_site_in_out = self.InOutDf("ps_retransmits", self.genData.rtm_related_only)
self.rtm_data = rtm_site_in_out['data']
self.rtm_dates = rtm_site_in_out['dates']
self.latency_df_related_only = self.genData.latency_df_related_only
self.throughput_df_related_only = self.genData.throughput_df_related_only
self.sites = self.orderSites()
@timer
def InOutDf(self, idx, idx_df):
print(idx)
in_out_values = []
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for t in ['dest_host', 'src_host']:
meta_df = idx_df.copy()
df = pd.DataFrame(qrs.queryDailyAvg(idx, t, time_list[0], time_list[1])).reset_index()
df['index'] = pd.to_datetime(df['index'], unit='ms').dt.strftime('%d/%m')
df = df.transpose()
header = df.iloc[0]
df = df[1:]
df.columns = ['day-3', 'day-2', 'day-1', 'day']
meta_df = pd.merge(meta_df, df, left_on="host", right_index=True)
three_days_ago = meta_df.groupby('site').agg({'day-3': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
two_days_ago = meta_df.groupby('site').agg({'day-2': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
one_day_ago = meta_df.groupby('site').agg({'day-1': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
today = meta_df.groupby('site').agg({'day': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
site_avg_df = reduce(lambda x,y: pd.merge(x,y, on='site', how='outer'), [three_days_ago, two_days_ago, one_day_ago, today])
site_avg_df.set_index('site', inplace=True)
change = site_avg_df.pct_change(axis='columns')
site_avg_df = pd.merge(site_avg_df, change, left_index=True, right_index=True, suffixes=('_val', ''))
site_avg_df['direction'] = 'IN' if t == 'dest_host' else 'OUT'
in_out_values.append(site_avg_df)
site_df = pd.concat(in_out_values).reset_index()
site_df = site_df.round(2)
return {"data": site_df,
"dates": header}
def orderSites(self):
problematic = []
problematic.extend(self.thp_data.nsmallest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.rtm_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.pls_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.owd_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic = list(set(problematic))
all_df = self.genData.all_df_related_only.copy()
all_df['has_problems'] = all_df['site'].apply(lambda x: True if x in problematic else False)
sites = all_df.sort_values(by='has_problems', ascending=False).drop_duplicates(['site'])['site'].values
return sites
class PrtoblematicPairsDataLoader(object, metaclass=Singleton):
gobj = GeneralDataLoader()
LIST_IDXS = ['ps_packetloss', 'ps_owd', 'ps_retransmits', 'ps_throughput']
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.all_df = self.gobj.all_df_related_only[['ip', 'is_ipv6', 'host', 'site', 'admin_email', 'admin_name', 'ip_in_ps_meta',
'host_in_ps_meta', 'host_index', 'site_index', 'host_meta', 'site_meta']].sort_values(by=['ip_in_ps_meta', 'host_in_ps_meta', 'ip'], ascending=False)
self.df = self.markNodes()
@timer
def buildProblems(self, idx):
print('buildProblems...',idx)
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(len(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
@timer
def getPercentageMeasuresDone(self, grouped, tempdf):
measures_done = tempdf.groupby('hash').agg({'doc_count':'sum'})
def findRatio(row, total_minutes):
if pd.isna(row['doc_count']):
count = '0'
else: count = str(round((row['doc_count']/total_minutes)*100))+'%'
return count
one_test_per_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.apply(lambda x: findRatio(x, one_test_per_min), axis=1)
grouped = pd.merge(grouped, measures_done, on='hash', how='left')
return grouped
# @timer
def markNodes(self):
df = pd.DataFrame()
for idx in hp.INDECES:
tempdf = pd.DataFrame(self.buildProblems(idx))
grouped = tempdf.groupby(['src', 'dest', 'hash']).agg({'value': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
grouped = self.getRelHosts(grouped)
# zscore based on a each pair value
tempdf['zscore'] = tempdf.groupby('hash')['value'].apply(lambda x: (x - x.mean())/x.std())
# add max zscore so that it is possible to order by worst
max_z = tempdf.groupby('hash').agg({'zscore':'max'}).rename(columns={'zscore':'max_hash_zscore'})
grouped = pd.merge(grouped, max_z, on='hash', how='left')
# zscore based on the whole dataset
grouped['zscore'] = grouped[['value']].apply(lambda x: (x - x.mean())/x.std())
grouped['idx'] = idx
# calculate the percentage of measures based on the assumption that ideally measures are done once every minute
grouped = self.getPercentageMeasuresDone(grouped, tempdf)
# this is not accurate since we have some cases with 4-5 times more tests than expected
# avg_numtests = tempdf.groupby('hash').agg({'doc_count':'mean'}).values[0][0]
# Add flags for some general problems
if (idx == 'ps_packetloss'):
grouped['all_packets_lost'] = grouped['hash'].apply(lambda x: 1 if x in grouped[grouped['value']==1]['hash'].values else 0)
else: grouped['all_packets_lost'] = -1
def checkThreshold(value):
if (idx == 'ps_packetloss'):
if value > 0.05:
return 1
return 0
elif (idx == 'ps_owd'):
if value > 1000 or value < 0:
return 1
return 0
elif (idx == 'ps_throughput'):
if round(value/1e+6, 2) < 25:
return 1
return 0
elif (idx == 'ps_retransmits'):
if value > 100000:
return 1
return 0
grouped['threshold_reached'] = grouped['value'].apply(lambda row: checkThreshold(row))
grouped['has_bursts'] = grouped['hash'].apply(lambda x: 1
if x in tempdf[tempdf['zscore']>5]['hash'].values
else 0)
grouped['src_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['src'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['dest_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['dest'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['measures'] = grouped['doc_count'].astype(str)+'('+grouped['tests_done'].astype(str)+')'
df = df.append(grouped, ignore_index=True)
df.fillna('N/A', inplace=True)
print(f'Total number of hashes: {len(df)}')
return df
@timer
def getValues(self, probdf):
# probdf = markNodes()
df = pd.DataFrame(columns=['timestamp', 'value', 'idx', 'hash'])
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for item in probdf[['src', 'dest', 'idx']].values:
tempdf = pd.DataFrame(qrs.queryAllValues(item[2], item, time_list[0], time_list[1]))
tempdf['idx'] = item[2]
tempdf['hash'] = item[0]+"-"+item[1]
tempdf['src'] = item[0]
tempdf['dest'] = item[1]
tempdf.rename(columns={hp.getValueField(item[2]): 'value'}, inplace=True)
df = df.append(tempdf, ignore_index=True)
return df
@timer
def getRelHosts(self, probdf):
df1 = pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['src', 'hash']], left_on='ip', right_on='src', how='right')
df2 = pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['dest', 'hash']], left_on='ip', right_on='dest', how='right')
df = pd.merge(df1, df2, on=['hash'], suffixes=('_src', '_dest'), how='inner')
df = df[df.duplicated(subset=['hash'])==False]
df = df.drop(columns=['ip_src', 'ip_dest'])
df = pd.merge(probdf, df, on=['hash', 'src', 'dest'], how='left')
return df
class SitesRanksDataLoader(metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.all_df = GeneralDataLoader().all_df_related_only
self.locdf = pd.DataFrame.from_dict(qrs.queryNodesGeoLocation(), orient='index').reset_index().rename(columns={'index':'ip'})
self.measures = pd.DataFrame()
self.df = self.calculateRank()
def FixMissingLocations(self):
df = pd.merge(self.all_df, self.locdf, left_on=['ip'], right_on=['ip'], how='left')
df = df.drop(columns=['site_y', 'host_y']).rename(columns={'site_x': 'site', 'host_x': 'host'})
df["lat"] = pd.to_numeric(df["lat"])
df["lon"] = pd.to_numeric(df["lon"])
for i, row in df.iterrows():
if row['lat'] != row['lat'] or row['lat'] is None:
site = row['site']
host = row['host']
lon = df[(df['site']==site)&(df['lon'].notnull())].agg({'lon':'mean'})['lon']
lat = df[(df['site']==site)&(df['lat'].notnull())].agg({'lat':'mean'})['lat']
if lat!=lat or lon!=lon:
lon = df[(df['host']==host)&(df['lon'].notnull())].agg({'lon':'mean'})['lon']
lat = df[(df['host']==host)&(df['lat'].notnull())].agg({'lat':'mean'})['lat']
df.loc[i, 'lon'] = lon
df.loc[i, 'lat'] = lat
return df
def queryData(self, idx):
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(len(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
def calculateRank(self):
df = pd.DataFrame()
for idx in hp.INDECES:
if len(df) != 0:
df = pd.merge(df, self.calculateStats(idx), on=['site', 'lat', 'lon'], how='outer')
else: df = self.calculateStats(idx)
# sum all ranks and
filter_col = [col for col in df if col.endswith('rank')]
df['rank'] = df[filter_col].sum(axis=1)
df = df.sort_values('rank')
df['rank1'] = df['rank'].rank(method='max')
filter_col = [col for col in df if col.endswith('rank')]
df['size'] = df[filter_col].apply(lambda row: 1 if row.isnull().any() else 3, axis=1)
return df
def getPercentageMeasuresDone(self, grouped, tempdf):
measures_done = tempdf.groupby(['src', 'dest']).agg({'doc_count':'sum'})
def findRatio(row, total_minutes):
if pd.isna(row['doc_count']):
count = '0'
else: count = round((row['doc_count']/total_minutes)*100)
return count
one_test_per_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.apply(lambda x: findRatio(x, one_test_per_min), axis=1)
grouped = pd.merge(grouped, measures_done, on=['src', 'dest'], how='left')
return grouped
def calculateStats(self, idx):
"""
For a given index it gets the average based on a site name and then the rank of each
"""
ldf = self.FixMissingLocations()
merge_on = {'in': 'dest', 'out': 'src'}
result = pd.DataFrame()
df = pd.DataFrame(self.queryData(idx))
df['idx'] = idx
self.measures = self.measures.append(df)
gdf = df.groupby(['src', 'dest', 'hash']).agg({'value': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
df = self.getPercentageMeasuresDone(gdf, df)
df['tests_done'] = df['tests_done'].apply(lambda val: 101 if val>100 else val)
for direction in ['in', 'out']:
# Merge location df with all 1-hour-averages for the given direction, then get the mean for the whole period
tempdf = pd.merge(ldf[['ip', 'site', 'site_meta', 'lat', 'lon']], df, left_on=['ip'], right_on=merge_on[direction], how='inner')
grouped = tempdf.groupby(['site', 'lat', 'lon']).agg({'value': lambda x: x.mean(skipna=False),
'tests_done': lambda x: round(x.mean(skipna=False))}, axis=1).reset_index()
# The following code checks the percentage of values > 3 sigma, which would show the site has bursts
tempdf['zscore'] = tempdf.groupby('site')['value'].apply(lambda x: (x - x.mean())/x.std())
bursts_percentage = tempdf.groupby('site')['zscore'].apply(lambda c: round(((np.abs(c)>3).sum()/len(c))*100,2))
grouped = pd.merge(grouped, bursts_percentage, on=['site'], how='left')
# In ps_owd there are cases of negative values.
asc = True
if idx == 'ps_owd':
grouped['value'] = grouped['value'].apply(lambda val: grouped['value'].max()+np.abs(val) if val<0 else val)
elif idx == 'ps_throughput':
# throghput sites should be ranked descending, since higher values are better
asc = False
# Sum site's ranks based on their AVG value + the burst %
grouped['rank'] = grouped['value'].rank(ascending=asc) + grouped['zscore'].rank(method='max')
# grouped = grouped.sort_values('tests_done')
# grouped['rank'] = grouped['rank'] + grouped['tests_done'].rank(ascending=False)
grouped = grouped.rename(columns={'value':f'{direction}_{idx}_avg',
'zscore':f'{direction}_{idx}_bursts_percentage',
'rank':f'{direction}_{idx}_rank',
'tests_done':f'{direction}_{idx}_tests_done_avg'})
if len(result) != 0:
# Merge directions IN and OUT in a single df
result = | pd.merge(result, grouped, on=['site', 'lat', 'lon'], how='outer') | pandas.merge |
# flu prediction
import os
import pandas as pd
import feather
from utils.fastai.structured import *
from utils.fastai.column_data import *
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix
import keras
from keras.layers import Input, Embedding, Dense, Dropout
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import metrics
pd.set_option('display.width', 250)
data_path = os.environ['DATA_DIR'] + 'epidata_flu/'
def drop_columns(df, cols):
"""drop columns form dataframe"""
df = df.drop(cols, axis=1)
return df
def show_prediction(model, raw_df, epiyear):
"""
compare prediction from actual values given epiyear
"""
def proc_df(df, max_n_cat=None, mapper=None):
""" standardizes continuous columns and numericalizes categorical columns
Parameters:
-----------
df: The data frame you wish to process.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
mapper: calculates the values used for scaling of variables during training time(mean
and standard deviation).
Returns:
--------
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
mapper: A DataFrameMapper which stores the mean and standard deviation of the
corresponding continous variables which is then used for scaling of during test-time.
"""
df = df.copy()
mapper = scale_vars(df, mapper)
for n, c in df.items():
numericalize(df, c, n, max_n_cat)
return pd.get_dummies(df, dummy_na=True), mapper
def concat_prior(df, cols, shift_num=4):
"""
shift dataframe forward to compute prior epiweek features
returns a dataframe with concatenated prior features
cols is a list of columns to shift
shift_num is how many prior weeks to shift
"""
# add concatenated features
df_grp = df.groupby(['region'])
df = []
for name, grp in df_grp:
grp = grp.sort_values(['epiyear', 'epiweeknum'], ascending=True).reset_index(drop=True)
grp_ = [grp]
for idx in range(1, shift_num+1):
grp_.append(grp[cols].shift(idx))
grp_[idx].columns = [c + '_prior_{}'.format(idx) for c, idx in zip(cols, len(cols)*[idx])]
grp = pd.concat(grp_, axis=1).loc[shift_num:]
df.append(grp)
return pd.concat(df).fillna(0)
def get_ml_data(recompute=False):
"""
preprocess data for ml training
num_shift is number of prior epiweeks to cancatenate for feature columns
"""
ml_data_path = data_path + 'ml_data/'
feather_names = ['train_x', 'train_y', 'val_x', 'val_y', 'test_x', 'test_y', 'train_xp']
mapper_names = ['mapper', 'le_wili', 'le_epi']
var_names = ['cat_vars', 'contin_vars', 'wiki_cols', 'pred_wili', 'pred_epi', 'pred_vars']
if not os.path.exists(ml_data_path):
os.makedirs(ml_data_path)
# read flu dataframe and shuffle
df = | pd.read_feather(data_path + 'joined_df.feather') | pandas.read_feather |
# -*- coding: utf-8 -*-
import logging
import numpy as np
import pandas as pd
import pymc3 as pm
from scipy.stats import wilcoxon
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.base import BaseEstimator, ClassifierMixin
from pkg_resources import get_distribution, DistributionNotFound
from lantiseaa.buffer import BaseBuffer, MemoryBuffer, LocalBuffer
from lantiseaa.extractor import BaseTSFeatureExtractor, TsfreshTSFeatureExtractor
from lantiseaa.baseline import BOWMNB
from lantiseaa.ts import *
try:
# Change here if project is renamed and does not equal the package name
dist_name = 'LanTiSEAA'
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
class LanTiSEAA(BaseEstimator, ClassifierMixin):
"""
The LanTiSEAA class uses time series transformers, feature extractor and baseline method
(or baseline predictions) to transform texts; extract, combine, and select features;
(compute baseline predictions); combine baseline predictions with time series
features; and train the meta-classifier for making predictions.
LanTiSEAA itself is a classifier and can be used under sklearn framework.
...
Attributes
----------
feature_groups_ : List
a list of names for the feature groups combined together
relevant_features_ : pandas.DataFrame
the relevant features table selected by the TSFeatureExtractor
Methods
-------
get_combined_features(self, train_test='train', surfix=None)
Get features from the methods in feature_groups_ attribute and combine
get_classes(self, classes=None, clf=None)
Get classes from the clf or use the classes parameter. If classes is given,
classes will be returned. Otherwise if clf is None, it will try to retrieve
the classes_ attribute from the meta_classifier. If failed, None will be
returned.
fit(self, X, y, baseline_prediction=None, classes=None, \
baseline_clf_fit_kwargs={}, baseline_clf_predict_kwargs={}, \
baseline_clf_predict_proba_kwargs={}, meta_clf_fit_kwargs={})
Fit on the given training data set, extract features and train the classifier
precompute_X(self, X, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={})
Precompute X, prepare a feature matrix for meta-classifier to make predictions
predict(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_kwargs={})
Make predictions on the given testing data set
predict_proba(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_proba_kwargs={})
Make probability predictions on the given testing data set
"""
def __init__(self, \
ts_transformers=[TokenLenSeqTransformer(), TokenFreqSeqTransformer(), TokenRankSeqTransformer(), TokenLenDistTransformer(), TokenRankDistTransformer()], \
feature_extractor=TsfreshTSFeatureExtractor(), baseline_classifier=BOWMNB(), \
use_predict_proba=True, \
meta_classifier=GradientBoostingClassifier(), buffer=MemoryBuffer()):
"""Construct new LanTiSEAA object
Parameters
----------
ts_transformers : List, optional
a list of ts_transformers. The ts_transformers will be used to map the texts and time series
features will be extracted from each type of the time series and then been combined and selected
(in default the list is five transformers - TokenLenSeqTransformer, TokenFreqSeqTransformer,
TokenRankSeqTransformer, TokenLenDistTransformer, and TokenRankDistTransformer - all under
lantiseaa.ts package).
feature_extractor : lantiseaa.extractor.BaseTSFeatureExtractor, optional
the time series feature extractor for extracting and selecting relevant features (default is
lantiseaa.extractor.TsfreshTSFeatureExtractor).
baseline_classifier : Classifier, optional
the baseline method for making baseline predictions. Ignored in fit, predict and
predict_proba if baseline_prediction is given in fit. If is None and baseline_prediction
is not given, baseline will be ignored and only time series features will be used (default
is lantiseaa.nlp.BOWMNB).
use_predict_proba : boolean, optional
either to use predict_proba or predict for the baseline to make predictions. Note that if
use_predict_proba is set to False, you need to encode y labels into integers for the
meta-classifier to work, if the meta-classifier only takes integer features (default is True)
meta_classifier : Classifier, optional
the meta-classifier to be trained on combined features and make predictions (default is
GradientBoostingClassifier from sklearn. Note this is different from the XGBClassifier by XGBoost
we used in the paper due to a generalizability issue with XGBClassifier - the number of classes
should be defined when initializing the object. For better performance, pass in XGBClassifier
instead of GradientBoostingClassifier instead).
buffer : lantiseaa.buffer.BaseBuffer, optional
the buffer used to store the data generated during computation (default is MemoryBuffer).
"""
self.ts_transformers = ts_transformers
self.feature_extractor = feature_extractor
self.baseline_classifier = baseline_classifier
self.use_predict_proba = use_predict_proba
self.meta_classifier = meta_classifier
self.buffer = buffer
self.flags_ = {'baseline_prediction_given_in_fit': False}
def get_combined_features(self, train_test='train', surfix=None):
'''Get features from the methods in feature_groups_ attribute and combine
Parameters
----------
train_test : str
the targeted train/test set to retrieve features from
surfix : str
the surfix for targeting the file/object to retrieve features from
'''
features = []
for feature_group in self.feature_groups_:
features.append(self.buffer.read_feature_set(feature_group, fold_number=None, train_test=train_test, surfix=surfix))
return pd.concat(features, axis=1)
def get_classes(self, classes=None, clf=None):
'''Get classes from the clf or use the classes parameter.
If classes is given, classes will be returned. Otherwise if clf is None,
it will try to retrieve the classes_ attribute from the meta_classifier.
If failed, None will be returned.
'''
# get classes from self.meta_classifier in default
if clf is None:
clf = self.meta_classifier
if classes is None:
try:
return clf.classes_
except AttributeError:
return None
else:
return classes
def fit(self, X, y, baseline_prediction=None, classes=None, \
baseline_clf_fit_kwargs={}, baseline_clf_predict_kwargs={}, \
baseline_clf_predict_proba_kwargs={}, meta_clf_fit_kwargs={}):
"""Fit on the given training data set, extract features and train the classifier
Parameters
----------
X : array-like
the training texts
y : array-like
the true target values
baseline_prediction : array-like, optional
the baseline predictions to be combined with the time series features to train the
meta-classifier. If is None, the non-None baseline_classifier will be used to make
baseline predictions (default is None).
classes : list, optional
the classes used as pandas.DataFrame column names when saving baseline_predictions
made by baseline_classifier. If is None, the value will be retrieved from
self.baseline_classifier.classes_ if exists. Ignored if use_predict_proba
is False (default is None).
baseline_clf_fit_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling fit. Ignored if
baseline_prediction is not None (default is an empty dictionary).
baseline_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict.
Ignored if baseline_prediction is not None or use_predict_proba is True
(default is an empty dictionary).
baseline_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict_proba.
Ignored if baseline_prediction is not None or use_predict_proba is False
(default is an empty dictionary).
meta_clf_fit_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling fit (default is an empty
dictionary).
"""
self.feature_groups_ = []
# if baseline_prediction is given, ignore baseline_classifier
if baseline_prediction is not None:
self.flags_['baseline_prediction_given_in_fit'] = True
if not isinstance(baseline_prediction, pd.DataFrame):
baseline_prediction = pd.DataFrame(data=baseline_prediction)
else:
self.flags_['baseline_prediction_given_in_fit'] = False
if self.baseline_classifier is not None: # if baseline_classifier is given
# compute baseline method
logging.info("Computing baseline method.")
self.baseline_classifier.fit(X, y, **baseline_clf_fit_kwargs)
if self.use_predict_proba:
pred = self.baseline_classifier.predict_proba(X, **baseline_clf_predict_proba_kwargs)
classes = self.get_classes(classes, self.baseline_classifier)
baseline_prediction = pd.DataFrame(data=pred, columns=classes)
else:
pred = self.baseline_classifier.predict(X, **baseline_clf_predict_kwargs)
baseline_prediction = pd.DataFrame(data=pred, columns=['y'])
self.buffer.save_class(self.baseline_classifier, method_name='baseline',
class_name=self.baseline_classifier.__class__.__name__)
# if at lease one of baseline_prediction or baseline_classifier is given so baseline
# predictions was computed
if baseline_prediction is not None:
self.buffer.save_feature_set(baseline_prediction, method_name='baseline', train_test='train')
self.buffer.save_prediction(baseline_prediction, method_name='baseline', train_test='train')
self.feature_groups_.append('baseline')
# compute time series features
for transformer in self.ts_transformers:
logging.info("Computing {} time series.".format(transformer.name))
# map texts into time series
transformer.fit(X.values)
time_series = transformer.transform(X.values)
# extract and save time series features
ts_features = self.feature_extractor.extract_features(time_series)
self.buffer.save_feature_set(ts_features, method_name=transformer.name, train_test='train')
self.feature_groups_.append(transformer.name)
# combine features
X_combined = self.get_combined_features(train_test='train', surfix=None)
combined_name = "_".join(self.feature_groups_)
# select and save relevant features
self.relevant_features_ = self.feature_extractor.select_relevant_features(X_combined, y)
self.buffer.save_feature_relevance_table(self.relevant_features_, method_name=combined_name)
X_relevant = X_combined[self.relevant_features_.feature]
# train and save meta-classifier
self.meta_classifier.fit(X_relevant, y, **meta_clf_fit_kwargs)
self.buffer.save_class(self.meta_classifier, method_name='meta_classifier',
class_name=self.meta_classifier.__class__.__name__)
return self
def precompute_X(self, X, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}):
"""Precompute X, prepare a feature matrix for meta-classifier to make predictions
Parameters
----------
X : array-like
the testing texts, ignored if precomputed_X is not None.
baseline_prediction : array_like, optional
the baseline predictions to be combined with the time series features for the meta-
classifier to make predictions. If the baseline_prediction is given in fit, this
parameter cannot be None, or an error will be raised, otherwise this parameter is
ignored (default is None).
classes : list, optional
the classes used as pandas.DataFrame column names when saving baseline_predictions
made by baseline_classifier. If is None, the value will be retrieved from
self.baseline_classifier.classes_ if exists (default is None).
surfix : str, optional
the surfix to be passed to the buffer when saving data
baseline_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict. Ignored if
baseline_prediction is not None or use_predict_proba is True (default is an
empty dictionary).
baseline_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict_proba.
Ignored if baseline_prediction is not None or use_predict_proba is False
(default is an empty dictionary).
"""
assert not (baseline_prediction is None and self.flags_['baseline_prediction_given_in_fit'] == True), "baseline_prediction cannot be None if it was used in fit."
if 'baseline' in self.feature_groups_:
if baseline_prediction is not None:
if not isinstance(baseline_prediction, pd.DataFrame):
baseline_prediction = pd.DataFrame(data=baseline_prediction)
else:
# compute baseline method
logging.info("Computing baseline method.")
if self.use_predict_proba:
pred = self.baseline_classifier.predict_proba(X, **baseline_clf_predict_proba_kwargs)
baseline_pred_classes = self.get_classes(classes, self.baseline_classifier)
baseline_prediction = pd.DataFrame(data=pred, columns=baseline_pred_classes)
else:
pred = self.baseline_classifier.predict(X, **baseline_clf_predict_kwargs)
baseline_prediction = pd.DataFrame(data=pred, columns=['y'])
self.buffer.save_feature_set(baseline_prediction, method_name='baseline', train_test='test', surfix=surfix)
self.buffer.save_prediction(baseline_prediction, method_name='baseline', train_test='test', surfix=surfix)
# compute time series features
for transformer in self.ts_transformers:
logging.info("Computing {} time series.".format(transformer.name))
# map texts into time series
time_series = transformer.transform(X.values)
# extract and save time series features
ts_features = self.feature_extractor.extract_features(time_series)
self.buffer.save_feature_set(ts_features, method_name=transformer.name, train_test='test', surfix=surfix)
# combine features and select relevant features
X_combined = self.get_combined_features(train_test='test', surfix=surfix)
return X_combined[self.relevant_features_.feature]
def predict(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_kwargs={}):
"""Make predictions on the given testing data set
Parameters
----------
X : array-like
the testing texts, ignored if precomputed_X is not None.
X_precomputed : array_like, optional
the precomputed combined feature set for the meta-classifier to make predictions,
often used when making predictions for the training data set. Besides, when saving
to buffer when X_precomputed is given, "train_test" will not be specified, instead,
surfix can be used to indicate the property (train, test, etc.) of the precomputed X.
baseline_prediction : array_like, optional
the baseline predictions to be combined with the time series features for the meta-
classifier to make predictions. If the baseline_prediction is given in fit, this
parameter cannot be None, or an error will be raised, otherwise this parameter is
ignored. Also ignored if precomputed_X is not None (default of this parameter is
None).
classes : list, optional
the classes used as pandas.DataFrame column names when saving baseline_predictions
made by baseline_classifier. If is None, the value will be retrieved from
self.baseline_classifier.classes_ if exists (default is None).
surfix : str, optional
the surfix to be passed to the buffer when saving data
baseline_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict. Ignored if
baseline_prediction is not None or use_predict_proba is True (default is an
empty dictionary).
baseline_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict_proba.
Ignored if baseline_prediction is not None or use_predict_proba is False
(default is an empty dictionary).
meta_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling predict (default is an
empty dictionary).
"""
if X_precomputed is not None:
X_relevant = X_precomputed[self.relevant_features_.feature]
pred = self.meta_classifier.predict(X_relevant, **meta_clf_predict_kwargs)
self.buffer.save_prediction(pd.DataFrame(pred, columns=['y']), method_name='meta_classifier', surfix=surfix)
return pred
else:
X_relevant = self.precompute_X(X, baseline_prediction=baseline_prediction,
classes=classes, surfix=surfix,
baseline_clf_predict_kwargs=baseline_clf_predict_kwargs,
baseline_clf_predict_proba_kwargs=baseline_clf_predict_proba_kwargs)
pred = self.meta_classifier.predict(X_relevant, **meta_clf_predict_kwargs)
self.buffer.save_prediction(pd.DataFrame(pred, columns=['y']), method_name='meta_classifier', train_test='test', surfix=surfix)
return pred
def predict_proba(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_proba_kwargs={}):
"""Make probability predictions on the given testing data set
Parameters
----------
X : array-like
the testing texts
X_precomputed : array_like, optional
the precomputed combined feature set for the meta-classifier to make predictions,
often used when making predictions for the training data set. Besides, when saving
to buffer when X_precomputed is given, "train_test" will not be specified, instead,
surfix can be used to indicate the property (train, test, etc.) of the precomputed X.
baseline_prediction : array_like, optional
the baseline predictions to be combined with the time series features for the meta-
classifier to make predictions. If the baseline_prediction is given in fit, this
parameter cannot be None, or an error will be raised, otherwise this parameter is
ignored. Also ignored if precomputed_X is not None (default of this parameter is
None).
classes : list, optional
the classes used as pandas.DataFrame column names when saving baseline_predictions
made by baseline_classifier and predictions made by the meta_classifier. If is None,
the corresponding values will be retrieved from self.baseline_classifier.classes_ and
self.meta_classifier.classes_ if exist (default is None).
surfix : str, optional
the surfix to be passed to the buffer when saving data
baseline_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict. Ignored if
baseline_prediction is not None or use_predict_proba is True (default is an
empty dictionary).
baseline_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict_proba.
Ignored if baseline_prediction is not None or use_predict_proba is False
(default is an empty dictionary).
meta_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling predict_proba (default is
an empty dictionary).
"""
# get column names for DataFrame saving meta-classifier predictions
meta_clf_pred_classes = self.get_classes(classes, self.meta_classifier)
# making predictions
if X_precomputed is not None:
X_relevant = X_precomputed[self.relevant_features_.feature]
pred = self.meta_classifier.predict_proba(X_relevant, **meta_clf_predict_proba_kwargs)
self.buffer.save_prediction(pd.DataFrame(pred, columns=meta_clf_pred_classes), method_name='meta_classifier', surfix=surfix)
return pred
else:
X_relevant = self.precompute_X(X, baseline_prediction=baseline_prediction,
classes=classes, surfix=surfix,
baseline_clf_predict_kwargs=baseline_clf_predict_kwargs,
baseline_clf_predict_proba_kwargs=baseline_clf_predict_proba_kwargs)
pred = self.meta_classifier.predict_proba(X_relevant, **meta_clf_predict_proba_kwargs)
self.buffer.save_prediction(pd.DataFrame(pred, columns=meta_clf_pred_classes), method_name='meta_classifier', train_test='test', surfix=surfix)
return pred
class IterativeLanTiSEAA(LanTiSEAA):
"""
The InterativeLanTiSEAA class implements framework for the iterative stacking procedure described
in the paper, where individual time series mapping methods were stacked to the baseline method
one by one until no significant improvement can be made. The framework is implemented in the
fit method of this class.
The InterativeLanTiSEAA class can also work as a classifier, where the selected combination
will be used to train a final model on the complete data set during fitting, and will be used
in predict and predict_proba for making predictions on new data.
...
Attributes
----------
feature_groups_ : List
a list of names for the feature groups combined together
relevant_features_ : pandas.DataFrame
the relevant features table selected by the TSFeatureExtractor
fold_indices_ : pandas.DataFrame
the fold indices used to split X in fit
Methods
-------
fold_train_test(self, fold, X, y)
Split training and testing data set from X and y using the given fold indices
bayesian_estimation(self, group1, group2, group1_name, group2_name)
Perform bayesian estimation to estimate the differences in means, standard deviations and effect size of two sample groups
fit(self, X, y, baseline_prediction=None, classes=None, \
fdr_level_bayesian=0.05, fdr_level_wilcoxon=0.05, \
baseline_clf_fit_kwargs={}, baseline_clf_predict_kwargs={},baseline_clf_predict_proba_kwargs={}, \
meta_clf_fit_kwargs={}, meta_clf_predict_kwargs={}, meta_clf_predict_proba_kwargs={})
Perform the iterative stacking procedure and fit on the complete training data set
get_combined_features(self, train_test='train', surfix=None)
Get features from the methods in feature_groups_ attribute and combine
get_classes(self, classes=None, clf=None)
Get classes from the clf or use the classes parameter. If classes is given,
classes will be returned. Otherwise if clf is None, it will try to retrieve
the classes_ attribute from the meta_classifier. If failed, None will be
returned.
precompute_X(self, X, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={})
Precompute X, prepare a feature matrix for meta-classifier to make predictions
predict(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_kwargs={})
Make predictions on the given testing data set
predict_proba(self, X, X_precomputed=None, baseline_prediction=None, classes=None, surfix=None, \
baseline_clf_predict_kwargs={}, baseline_clf_predict_proba_kwargs={}, meta_clf_predict_proba_kwargs={})
Make probability predictions on the given testing data set
"""
def __init__(self, \
ts_transformers=[TokenLenSeqTransformer(), TokenFreqSeqTransformer(), TokenRankSeqTransformer(), TokenLenDistTransformer(), TokenRankDistTransformer()], \
feature_extractor=TsfreshTSFeatureExtractor(), baseline_classifier=BOWMNB(), \
meta_classifier=GradientBoostingClassifier(), cv=None, \
metric=log_loss, greater_is_better=False, \
use_predict_proba=True, \
buffer=MemoryBuffer(), random_state=None):
"""Construct new LanTiSEAA object
Parameters
----------
ts_transformers : List, optional
a list of ts_transformers. The ts_transformers will be used to map the texts and time series
features will be extracted from each type of the time series and then been combined and selected
(in default the list is five transformers - TokenLenSeqTransformer, TokenFreqSeqTransformer,
TokenRankSeqTransformer, TokenLenDistTransformer, and TokenRankDistTransformer - all under
lantiseaa.ts package).
feature_extractor : lantiseaa.extractor.BaseTSFeatureExtractor, optional
the time series feature extractor for extracting and selecting relevant features (default is
lantiseaa.extractor.TsfreshTSFeatureExtractor).
baseline_classifier : Classifier, optional
the baseline method for making baseline predictions. Ignored in fit, predict and
predict_proba if baseline_prediction is given in fit. If is None and baseline_prediction
is not given, baseline will be ignored and only time series features will be used (default
is lantiseaa.nlp.BOWMNB).
meta_classifier : Classifier, optional
the meta-classifier to be trained on combined features and make predictions (default is
GradientBoostingClassifier from sklearn. Note this is different from the XGBClassifier by XGBoost
we used in the paper due to a generalizability issue with XGBClassifier - the number of classes
should be defined when initializing the object. For better performance, pass in XGBClassifier
instead of GradientBoostingClassifier instead).
cv : int, cross-validation generator or an iterable, optional
the cv splitting strategy used for setting up the cvs for the iterative framework. Values can
be chosen from:
* None, to use the default 10-fold cross validation generating by a StratifiedKFold,
* integer, to specify the number of folds in a StratifiedKFold, cannot be less than 5 to allow accurate statistical tests
* cross-validation generator
When None or an integer is passed in, a StratifiedKFold will be used to split the folds and
the random_state parameter will be used to specify the random seed for the StratifiedKFold.
use_predict_proba : boolean, optional
use predict_proba for the baseline method, meta classifier, and metric. It will be applied to all
three. Note that if use_predict_proba is set to False, you need to encode y labels into integers
for the meta-classifier to work, if the meta-classifier only takes integer features (default is True)
metric : sklearn.metric, optional
the metric used to score the predictions (default is log_loss)
greater_is_better : boolean, optional
property of metric - where a greater score is a better score (default is False)
buffer : lantiseaa.buffer.BaseBuffer, optional
the buffer used to store the data generated during computation (default is MemoryBuffer).
random_state : int, optional
the random seed to be used when constructing cv_folds, ignored if the cv parameter given is a
cross-validation generator or an iterable (default is None).
"""
self.ts_transformers = ts_transformers
self.feature_extractor = feature_extractor
self.baseline_classifier = baseline_classifier
self.meta_classifier = meta_classifier
self.metric = metric
self.greater_is_better = greater_is_better
self.use_predict_proba = use_predict_proba
self.buffer = buffer
self.random_state = random_state
self.flags_ = {'baseline_prediction_given_in_fit': False}
if cv is None:
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=self.random_state)
elif isinstance(cv, int):
assert (cv >= 5), "Number of folds cannot be less than 5 to allow accurate statistical tests."
cv = StratifiedKFold(n_splits=cv, shuffle=True, random_state=self.random_state)
self.cv = cv
self.dataset_independent_ts_ = []
for transformer in self.ts_transformers:
if isinstance(transformer, BaseDatasetIndependentTSTransformer):
self.dataset_independent_ts_.append(transformer.name)
def fold_train_test(self, fold, X, y):
'''Split training and testing data set from X and y using the given fold indices
Parameters
----------
fold : tuple
the fold indices for splitting train and test sets. fold is a tuple of two elements,
fold[0] is the fold number of the fold under the k-folds, and fold[1] is a row in
a pandas DataFrame, containing two columns 'train' and 'test'. Under 'train' are the
indices for the training set under this fold and under 'test' are the indices for the
testing set under this fold.
X : array-like
the data to be split
y : array-like
the target to be split
'''
# split train test
train_indices = fold[1].train
test_indices = fold[1].test
train = X[train_indices].reset_index(drop=True)
test = X[test_indices].reset_index(drop=True)
y_train = y[train_indices].reset_index(drop=True)
y_test = y[test_indices].reset_index(drop=True)
return train, test, y_train, y_test
def bayesian_estimation(self, group1, group2, group1_name, group2_name):
'''Perform bayesian estimation to estimate the differences in means, standard deviations and effect size of two sample groups
'''
y1 = np.array(group1)
y2 = np.array(group2)
y = pd.DataFrame(dict(value=np.r_[y1, y2], group=np.r_[[group1_name]*len(y1), [group2_name]*len(y2)]))
μ_m = y.value.mean()
μ_s = y.value.std() * 2
with pm.Model() as model:
group1_mean = pm.Normal('group1_mean', μ_m, sd=μ_s)
group2_mean = pm.Normal('group2_mean', μ_m, sd=μ_s)
# according to https://docs.pymc.io/notebooks/BEST.html, instead of a very wide uniform prior to the standard deviation,
# "apply as much prior information that you have available to the parameterization of prior distributions" would be better.
# the std for our data are usually very small, so let's set the group standard deviation to have a Uniform(0.01-0.1)
σ_low = 0.01
σ_high = 0.1
with model:
group1_std = pm.Uniform('group1_std', lower=σ_low, upper=σ_high)
group2_std = pm.Uniform('group2_std', lower=σ_low, upper=σ_high)
with model:
ν = pm.Exponential('ν_minus_one', 1/29.) + 1
with model:
λ1 = group1_std**-2
λ2 = group2_std**-2
group1 = pm.StudentT(group1_name, nu=ν, mu=group1_mean, lam=λ1, observed=y1)
group2 = pm.StudentT(group2_name, nu=ν, mu=group2_mean, lam=λ2, observed=y2)
with model:
diff_of_means = pm.Deterministic('difference of means', group1_mean - group2_mean)
diff_of_stds = pm.Deterministic('difference of stds', group1_std - group2_std)
effect_size = pm.Deterministic('effect size', diff_of_means / np.sqrt((group1_std**2 + group2_std**2) / 2))
with model:
trace = pm.sample(2000, cores=None)
return trace
def fit(self, X, y, baseline_prediction=None, classes=None, \
fdr_level_bayesian=0.05, fdr_level_wilcoxon=0.05, \
baseline_clf_fit_kwargs={}, baseline_clf_predict_kwargs={},baseline_clf_predict_proba_kwargs={}, \
meta_clf_fit_kwargs={}, meta_clf_predict_kwargs={}, meta_clf_predict_proba_kwargs={}):
"""Perform the iterative stacking procedure and fit on the complete training data set
Unlike LanTiSEAA, baseline_prediction parameter is not allowed here as the data set
will be split into multiple folds during iterative computation. To get the most realistic
measurements in how the time series features will improve the baseline method, the
baseline method need to be fit on training data and predict on testing data under each
fold. A pre-defined baseline_prediction will lead to
Parameters
----------
X : array-like
the training texts
y : array-like
the true target values
(deprecated) baseline_prediction : array-like, optional
the baseline predictions to be combined with the time series features to train the
meta-classifier. If is None, the non-None baseline_classifier will be used to make
baseline predictions (default is None).
It is deprecated here as the data set will be split into multiple folds during iterative
computation. To get the most realistic measurements in how the time series features will
improve the baseline method, the baseline method need to be fit on training data and
predict on testing data under each fold. A pre-defined baseline_prediction will lead to
biased measurements and selection of the time series methods.
classes : list, optional
the classes used as pandas.DataFrame column names when saving baseline_predictions
made by baseline_classifier and predictions made by the meta_classifier. If is None,
the corresponding values will be retrieved from self.baseline_classifier.classes_ and
self.meta_classifier.classes_ if exist (default is None).
fdr_level_bayesian : float, optional
the expected false estimation rate for bayesian estimation (default is 0.05)
fdr_level_wilcoxon : float, optional
the expected false estimation rate for Wilcoxon Signed Rank Test (default is 0.05)
baseline_clf_fit_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling fit. Ignored if
baseline_prediction is not None (default is an empty dictionary).
baseline_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict. Ignored if
baseline_prediction is not None or use_predict_proba is True (default is an
empty dictionary).
baseline_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the baseline classifier when calling predict_proba.
Ignored if baseline_prediction is not None or use_predict_proba is False
(default is an empty dictionary).
meta_clf_fit_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling fit (default is an empty
dictionary).
meta_clf_predict_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling predict. Ignored if
use_predict_proba is True (default is an empty dictionary).
meta_clf_predict_proba_kwargs : dict, Optional
the kwargs to be passed to the meta classifier when calling predict_proba.
Ignored if use_predict_proba is False (default is an empty dictionary).
"""
self.feature_groups_ = []
if (baseline_prediction is not None) or (self.baseline_classifier is not None):
self.feature_groups_.append('baseline')
# if baseline_prediction is given, ignore baseline_classifier
if baseline_prediction is not None:
self.flags_['baseline_prediction_given_in_fit'] = True
if not isinstance(baseline_prediction, pd.DataFrame):
baseline_prediction = | pd.DataFrame(data=baseline_prediction) | pandas.DataFrame |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import atn_analysis as atn
import atn_analysis
import db_tools
# Set global styles for plots
plt.rcParams["font.family"] = "Times New Roman"
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
line_type = {1:'-',2:'--',3:':',4:'-.'}
def remove_frequency(db_path, file, airline, include_data, can_limit, zs_limit, processed_direc):
"""
Creates a dictionary of airports and their removal frequency for a given airline
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a dictionary containing airport removal frequency values
Notes
-----
"""
df_net_tuple = pd.DataFrame()
df_net = atn_analysis.raw_query(db_path, file, airline)
df_net_tuple["Origin"] = df_net.Origin_Airport_Code
df_net_tuple["Destination"] = df_net.Destination_Airport_Code
graph = [tuple(x) for x in df_net_tuple.to_records(index=False)]
G = nx.Graph()
G.add_edges_from(graph)
tempG = G.copy()
Airport_Dict = {}
for i in G.nodes():
Airport_Dict[i] = 0
Total_List = get_remove_list(db_path, file,include_data, airline, can_limit, zs_limit, processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
airport_list = Total_List[j]
for l in airport_list:
tempG.remove_node(l)
Airport_Dict[l] = Airport_Dict[l] + 1
tempG = G.copy()
return(Airport_Dict)
def weighted_edge(db_path, file, airline):
"""
Creates a data frame of origin airports, destination airports and weights for each route
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a data frame containing each respective weighted route from an origin airport to a destination
Notes
-----
"""
df = atn_analysis.raw_query(db_path, file, airline)
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
df_tuple = | pd.DataFrame() | pandas.DataFrame |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls= | pd.Series(ltcacls) | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from toolz.dicttoolz import valmap
from kartothek.core.factory import DatasetFactory
from kartothek.io.eager import store_dataframes_as_dataset
def assert_index_dct_equal(dict1, dict2):
dict1 = valmap(sorted, dict1)
dict2 = valmap(sorted, dict2)
assert dict1 == dict2
def test_build_indices(store_factory, metadata_version, bound_build_dataset_indices):
dataset_uuid = "dataset_uuid"
partitions = [
| pd.DataFrame({"p": [1, 2]}) | pandas.DataFrame |
# Created by fw at 8/14/20
import torch
import numpy as np
import pandas as pd
import joblib
from torch.utils.data import Dataset as _Dataset
# from typing import Union,List
import lmdb
import io
import os
def get_dataset(cfg, city, dataset_type):
cfg = cfg.DATASET
assert city.upper() in ["BERLIN", "ISTANBUL", "MOSCOW", "ALL"], "wrong city"
Dataset: object = globals()[cfg.NAME]
if city.upper() == "ALL":
d = []
for c in ["BERLIN", "ISTANBUL", "MOSCOW"]:
d.append(Dataset(cfg, c, dataset_type))
dataset = torch.utils.data.ConcatDataset(d)
else:
dataset = Dataset(cfg, city, dataset_type)
return dataset
# 2019-01-01 TUESDAY
def _get_weekday_feats(index):
dayofyear = index // 288 + 1
weekday = np.zeros([7, 495, 436], dtype=np.float32)
weekday[(dayofyear + 1) % 7] = 1
return weekday
def _get_time_feats(index):
index = index % 288
theta = index / 287 * 2 * np.pi
time = np.zeros([2, 495, 436], dtype=np.float32)
time[0] = np.cos(theta)
time[1] = np.sin(theta)
return time
# map to [0,255]
def _get_weekday_feats_v2(index) -> np.array:
dayofyear = index // 288 + 1
weekday = np.zeros([7, 495, 436], dtype=np.float32)
weekday[(dayofyear + 1) % 7] = 255
return weekday
# map to [0,255]
def _get_time_feats_v2(index) -> np.array:
index = index % 288
theta = index / 287 * 2 * np.pi
time = np.zeros([2, 495, 436], dtype=np.float32)
time[0] = (np.cos(theta) + 1) / 2 * 255
time[1] = (np.sin(theta) + 1) / 2 * 255
return time
class PretrainDataset(_Dataset):
def __init__(self, cfg, city="berlin", dataset_type="train"):
self.city = city.upper()
self.cfg = cfg
self.dataset_type = dataset_type
self.sample = self._sample(dataset_type)
self.env = None
self.transform_env = None
# TODO
def __len__(self):
return len(self.sample)
def _sample(self, dataset_type):
assert dataset_type in ["train", "valid"], "wrong dataset type"
if dataset_type == "train":
return range(105120)
if dataset_type == "valid":
return np.random.choice(range(105120), 1024)
# TODO
def __getitem__(self, idx):
if self.env is None:
self.env = lmdb.open(
os.path.join(self.cfg.DATA_PATH, self.city), readonly=True
)
# print(idx)
start_idx = self.sample[idx]
x = [self._get_item(start_idx + i) for i in range(12)]
x = np.concatenate(x)
y = [self._get_item(start_idx + i) for i in [12, 13, 14, 17, 20, 23]]
y = np.concatenate(y)
extra = np.concatenate(
[_get_time_feats_v2(start_idx), _get_weekday_feats_v2(start_idx)]
)
return {"x": x, "y": y, "extra": extra}
def _get_item(self, idx):
idx = str(idx).encode("ascii")
try:
with self.env.begin() as txn:
data = txn.get(idx)
data = np.load(io.BytesIO(data))
x = np.zeros(495 * 436 * 3, dtype=np.uint8)
x[data["x"]] = data["y"]
x = x.reshape([495, 436, 3])
x = np.moveaxis(x, -1, 0)
except:
x = np.zeros([3, 495, 436], dtype=np.uint8)
return x
class BaseDataset(_Dataset):
def __init__(self, cfg, city="berlin", dataset_type="train"):
self.city = city.upper()
self.cfg = cfg
self.dataset_type = dataset_type
self.sample = self._sample(dataset_type)
self.env = None
self.transform_env = None
# TODO
def __len__(self):
return len(self.sample)
def _sample(self, dataset_type):
assert dataset_type in ["train", "valid", "test"], "wrong dataset type"
self.valid_index = np.load(self.cfg.VALID_INDEX)["index"]
self.test_index = np.load(self.cfg.TEST_INDEX)["index"]
self.valid_and_text_index = np.append(self.test_index, self.valid_index)
self.valid_and_text_index.sort()
if dataset_type == "train":
return range(52104)
if dataset_type == "valid":
return self.valid_index
if dataset_type == "test":
return self.test_index
# TODO
def __getitem__(self, idx):
if self.env is None:
self.env = lmdb.open(
os.path.join(self.cfg.DATA_PATH, self.city), readonly=True
)
# print(idx)
start_idx = self.sample[idx]
x = [self._get_item(start_idx + i) for i in range(12)]
x = np.concatenate(x)
if self.dataset_type != "test":
y = [self._get_item(start_idx + i)[:-1] for i in [12, 13, 14, 17, 20, 23]]
y = np.concatenate(y)
return {"x": x, "y": y}
else:
return {"x": x}
def _get_item(self, idx):
idx = str(idx).encode("ascii")
try:
with self.env.begin() as txn:
data = txn.get(idx)
data = np.load(io.BytesIO(data))
x = np.zeros(495 * 436 * 9, dtype=np.uint8)
x[data["x"]] = data["y"]
x = x.reshape([495, 436, 9])
x = np.moveaxis(x, -1, 0)
except:
x = np.zeros([9, 495, 436], dtype=np.uint8)
return x
def sample_by_month(self, month):
if type(month) is int:
month = [month]
sample = []
one_day = | pd.to_datetime("2019-01-02") | pandas.to_datetime |
import pandas as pd
from collections import OrderedDict
import csv
print("Started.")
# define paths to your existing zooniverse classifications and metadata locations
# These are the files you get as output from the Jupyter Notebook convert_zooniverse.ipynb
classifs=pd.read_csv("../zooniverse_classifications/zooniverse_data_all_final.csv")
metadata=pd.read_csv("../metadata/metadata_all_PU.csv",sep=',')
filename_links= | pd.read_csv("../metadata/filename_links.csv") | pandas.read_csv |
#!/usr/bin/env python
import numpy as np
import pandas as pa
import pb
from glccIndex import *
from collections import OrderedDict
def calc_area_from_LCCmatrix(veg5ini,list_LCCmatrix,area):
"""
Caculate the time series of area from initial vegetation
array (veg5ini) and a list of LCCmatrix.
Parameters:
-----------
veg5ini: Initial vegetation fraction array, in the sequnce
of baresoil, forest, grass, pasture, crop.
list_LCCmatrix: list of LCC matrix, each matrix having
12 as the length of first dimension. In the sequence
of:
f2g=1-1; f2p=2-1; f2c=3-1;
g2f=4-1; g2p=5-1; g2c=6-1;
p2f=7-1; p2g=8-1; p2c=9-1;
c2f=10-1; c2g=11-1; c2p=12-1
area: area used to calcuate from the fraction to absolute area
"""
#Initial arrays of veg fraction
veget_1500 = veg5ini.copy()
forest = veget_1500[1]
grass = veget_1500[2]
pasture = veget_1500[3]
crop = veget_1500[4]
list_forest = []
list_grass = []
list_pasture = []
list_crop = []
list_forest.append(forest)
list_grass.append(grass)
list_pasture.append(pasture)
list_crop.append(crop)
#indices
f2g=1-1; f2p=2-1; f2c=3-1; g2f=4-1; g2p=5-1; g2c=6-1; p2f=7-1; p2g=8-1; p2c=9-1; c2f=10-1; c2g=11-1; c2p=12-1
for ind in range(len(list_LCCmatrix)):
arr = list_LCCmatrix[ind]
forest_new = forest - arr[f2g] - arr[f2p] - arr[f2c] + arr[g2f] + arr[p2f] + arr[c2f]
grass_new = grass - arr[g2f] - arr[g2p] - arr[g2c] + arr[f2g] + arr[p2g] + arr[c2g]
pasture_new = pasture - arr[p2f] - arr[p2g] - arr[p2c] + arr[f2p] + arr[g2p] + arr[c2p]
crop_new = crop - arr[c2f] - arr[c2p] - arr[c2g] + arr[f2c] + arr[p2c] + arr[g2c]
list_forest.append(forest_new)
list_grass.append(grass_new)
list_pasture.append(pasture_new)
list_crop.append(crop_new)
forest = forest_new.copy()
grass = grass_new.copy()
pasture = pasture_new.copy()
crop = crop_new.copy()
baresoil = np.tile(veget_1500[0],(len(list_crop),1,1))
series_arr = map(lambda list_arr:np.rollaxis(np.ma.dstack(list_arr),2,0),[list_forest,list_grass,list_pasture,list_crop])
veg5type = dict(zip(['bareland', 'forest', 'grass', 'pasture', 'crop'],[baresoil] + series_arr))
dic = pb.Dic_Apply_Func(lambda x:np.ma.sum(x*area,axis=(1,2)),veg5type)
dft = pa.DataFrame(dic)
return dft
glccmatrix_string = ['f2g','f2p','f2c','g2f','g2p','g2c','p2f','p2g','p2c','c2f','c2g','c2p']
f2g=1-1; f2p=2-1; f2c=3 -1; g2f=4-1; g2p=5-1; g2c=6-1; p2f=7-1; p2g=8-1; p2c=9-1; c2f=10-1; c2g=11-1; c2p=12-1
def lccm_SinglePoint_to_4X4matrix(lcc_matrix):
"""
Convert the 12-length lcc_matrix to 4x4 matrix, return a dataframe:
Column = recieving; Row = giving
f g p c
f - - f2p f2c
g g2f - g2p g2c
p p2f p2g - p2c
c c2f c2g c2p -
"""
f2g=1-1; f2p=2-1; f2c=3 -1; g2f=4-1; g2p=5-1; g2c=6-1; p2f=7-1; p2g=8-1; p2c=9-1; c2f=10-1; c2g=11-1; c2p=12-1
arr = np.zeros((4,4))
arr[0,1] = lcc_matrix[f2g]
arr[0,2] = lcc_matrix[f2p]
arr[0,3] = lcc_matrix[f2c]
#arr[0,0] = 1-arr[0,1:].sum()
arr[1,0] = lcc_matrix[g2f]
arr[1,2] = lcc_matrix[g2p]
arr[1,3] = lcc_matrix[g2c]
#arr[1,1] = 1-lcc_matrix[[g2f,g2p,g2c]].sum()
arr[2,0] = lcc_matrix[p2f]
arr[2,1] = lcc_matrix[p2g]
arr[2,3] = lcc_matrix[p2c]
#arr[2,2] = 1-lcc_matrix[[p2f,p2g,p2c]].sum()
arr[3,0] = lcc_matrix[c2f]
arr[3,1] = lcc_matrix[c2g]
arr[3,2] = lcc_matrix[c2p]
#arr[3,3] = 1-lcc_matrix[[c2f,c2g,c2p]].sum()
dft = pa.DataFrame(arr,columns=['f','g','p','c'],index=['f','g','p','c'])
return dft
def vegmax_SinglePoint_to_veg4type(vegmax):
"""
Convert a single-point 65-length veget_max into fractions of
[f,g,p,c]
"""
f = vegmax[ia2_1:ia9_5+1].sum()
g = vegmax[ia10_1:ia10_4+1].sum() + vegmax[ia12_1:ia12_4+1].sum()
p = vegmax[ia11_1:ia11_4+1].sum() + vegmax[ia13_1:ia13_4+1].sum()
c = vegmax[ia14_1:ia15_4+1].sum()
return np.array([f,g,p,c])
def glccpftmtc_SinglePoint_to_4X4matrix(glcc_pftmtc):
"""
Convert the glcc_pftmtc(65x15) to 4x4 matrix, in a dataframe:
Column = recieving; Row = giving
f g p c
f - - f2p f2c
g g2f - g2p g2c
p p2f p2g - p2c
c c2f c2g c2p -
"""
glccReal_from_glccpftmtc = np.zeros((4,4))
glccReal_from_glccpftmtc[0,0] = glcc_pftmtc[ia2_1:ia9_5+1,1:9].sum()
glccReal_from_glccpftmtc[0,1] = glcc_pftmtc[ia2_1:ia9_5+1,[9,11]].sum()
glccReal_from_glccpftmtc[0,2] = glcc_pftmtc[ia2_1:ia9_5+1,[10,12]].sum()
glccReal_from_glccpftmtc[0,3] = glcc_pftmtc[ia2_1:ia9_5+1,[13,14]].sum()
glccReal_from_glccpftmtc[1,0] = glcc_pftmtc[ia10_1:ia10_4+1,1:9].sum() + glcc_pftmtc[ia12_1:ia12_4+1,1:9].sum()
glccReal_from_glccpftmtc[1,2] = glcc_pftmtc[ia10_1:ia10_4+1,[10,12]].sum() + glcc_pftmtc[ia12_1:ia12_4+1,[10,12]].sum()
glccReal_from_glccpftmtc[1,3] = glcc_pftmtc[ia10_1:ia10_4+1,[13,14]].sum() + glcc_pftmtc[ia12_1:ia12_4+1,[13,14]].sum()
glccReal_from_glccpftmtc[2,0] = glcc_pftmtc[ia11_1:ia11_4+1,1:9].sum() + glcc_pftmtc[ia13_1:ia13_4+1,1:9].sum()
glccReal_from_glccpftmtc[2,1] = glcc_pftmtc[ia11_1:ia11_4+1,[9,11]].sum() + glcc_pftmtc[ia13_1:ia13_4+1,[9,11]].sum()
glccReal_from_glccpftmtc[2,3] = glcc_pftmtc[ia11_1:ia11_4+1,[13,14]].sum() + glcc_pftmtc[ia13_1:ia13_4+1,[13,14]].sum()
glccReal_from_glccpftmtc[3,0] = glcc_pftmtc[ia14_1:ia15_4+1,1:9].sum()
glccReal_from_glccpftmtc[3,1] = glcc_pftmtc[ia14_1:ia15_4+1,[9,11]].sum()
glccReal_from_glccpftmtc[3,2] = glcc_pftmtc[ia14_1:ia15_4+1,[10,12]].sum()
dft = | pa.DataFrame(glccReal_from_glccpftmtc,columns=['f','g','p','c'],index=['f','g','p','c']) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import geopandas as gpd
import numpy as np
# for debugging purposes
import json
external_stylesheets = ['stylesheet.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
h_max = 550
margin_val = 30
df = pd.read_csv("data/data.csv")
feature_names = df.drop(['neighborhood code','neighborhood name',
'district name'], axis=1).head()
# relative path; ensure that the present script contains the data subdirectory
data_path = "data/barris.geojson"
gdf = gpd.read_file(data_path)
gdf.rename(columns={"BARRI": "neighborhood code"}, inplace=True)
gdf["neighborhood code"] = gdf["neighborhood code"].apply(int)
gdf["nbd code"] = gdf["neighborhood code"]
df_merged = | pd.merge(gdf, df, on="neighborhood code") | pandas.merge |
'''
@Author: your name
@Date: 2020-05-25 10:31:42
@LastEditTime: 2020-05-25 11:03:34
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /dataprocess/transform/colconcat-neu.py
'''
import pandas as pd
import argparse
import os
import shutil
import random
parser = argparse.ArgumentParser()
parser.add_argument("--left_data_dir", type=str)
parser.add_argument("--right_data_dir", type=str)
parser.add_argument("--output_data_dir", type=str)
parser.add_argument("--cols", type=str, default=None)
parser.add_argument("--target", type=str, default=None)
args = parser.parse_args()
if os.path.exists(os.path.join(args.left_data_dir, 'train.csv')) and os.path.exists(os.path.join(args.right_data_dir, 'train.csv')):
mode = 'train'
left_dataset = pd.read_csv(os.path.join(args.left_data_dir, 'train.csv'))
right_dataset = pd.read_csv(os.path.join(args.right_data_dir, 'train.csv'))
elif os.path.exists(os.path.join(args.left_data_dir, 'val.csv')) and os.path.exists(os.path.join(args.right_data_dir, 'val.csv')):
mode = 'val'
left_dataset = pd.read_csv(os.path.join(args.left_data_dir, 'val.csv'))
right_dataset = pd.read_csv(os.path.join(args.right_data_dir, 'val.csv'))
else:
raise IOError('接受的文件中,既没有train.csv也没有val.csv')
if len(left_dataset) != len(right_dataset):
raise ValueError('两个输入文件行数不同,不能进行列的拼接')
left_data = | pd.read_csv(left_dataset) | pandas.read_csv |
#!/usr/bin/python3
import json
from datetime import date, timedelta
from operator import itemgetter
from pprint import pprint
from typing import Dict, Tuple
import jinja2
import pandas as pd
from matplotlib import pyplot as plt
from rpm import labelCompare
# start statistics at this date (used as starting point for graphs)
STATS_START_DATE = date.fromisoformat("2019-02-26")
VALID_EVENTS = [
"released",
"updated",
"adopted",
"removed",
]
with open("../data/packages.json") as FILE:
PACKAGES = json.loads(FILE.read())
class Package:
def __init__(self, name: str):
self.name = name
self.last_version = None
self.last_updated = None
self.sig_package = False
self.update_backlog: Dict[Version, int] = dict()
def fluffify(version) -> Tuple[str, str, str]:
return "0", version, "0"
class Version(str):
def __hash__(self):
return super().__hash__()
def __lt__(self, other):
return labelCompare(fluffify(self), fluffify(other)) == -1
def __gt__(self, other):
return labelCompare(fluffify(self), fluffify(other)) == 1
def __eq__(self, other):
return labelCompare(fluffify(self), fluffify(other)) == 0
def __ne__(self, other):
return labelCompare(fluffify(self), fluffify(other)) != 0
def __le__(self, other):
return labelCompare(fluffify(self), fluffify(other)) != 1
def __ge__(self, other):
return labelCompare(fluffify(self), fluffify(other)) != -1
def validate(datum):
event = datum["event"]
if event not in VALID_EVENTS:
print(f"Invalid event: {event}:")
pprint(datum)
raise Exception("Invalid event")
name = datum["package"]
if name not in PACKAGES:
print(f"Invalid package: {name}:")
pprint(datum)
raise Exception("Invalid package")
datestr = datum["date"]
try:
date.fromisoformat(datestr)
except:
print(f"Invalid date: {datestr}:")
pprint(datum)
raise Exception("Invalid date")
version = datum["version"]
if version is not None and "-" in version:
print(f"Invalid version: {version}:")
pprint(datum)
raise Exception("Invalid version")
def main():
with open("../data/events.json") as file:
data = json.loads(file.read())
data.sort(key=itemgetter("date"))
for datum in data:
validate(datum)
start_date = date.fromisoformat(data[0]["date"])
stop_date = date.today()
packages: Dict[str, Package] = {name: Package(name) for name in PACKAGES}
statistics = dict()
# FIXME optimize
# iterate over all days
current_date = start_date
while current_date <= stop_date:
stats = dict()
statistics[current_date] = stats
events = list(
filter(lambda x: x["date"] == current_date.isoformat(), data)
)
# increment package update backlog lengths
for package in packages.values():
for version in package.update_backlog.keys():
package.update_backlog[version] += 1
# update package data according to this day's events
for event in events:
name: str = event["package"]
package: Package = packages[name]
etype: str = event["event"]
if etype == "adopted":
package.sig_package = True
continue
if etype == "removed":
package.sig_package = False
continue
# for "released" and "updated", there's a version
version = Version(event["version"])
if etype == "released":
# only update latest version if it's actually greater
if package.last_version is None or package.last_version < version:
package.last_version = version
# initialize backlog duration for new versions with 0
if package.last_updated is None or version > package.last_updated:
package.update_backlog[version] = 0
if etype == "updated":
# always update latest update, assume it never decreases
package.last_updated = version
# if the package is updated to the latest version:
if package.last_version == package.last_updated:
# drop all backlog information
package.update_backlog.clear()
# if it was updated to a version that's not the latest version:
else:
# drop all versions that are older than the updated one
versions = list(package.update_backlog.keys())
for v in versions:
if version >= v:
package.update_backlog.pop(v)
# filter by packages that are maintained by the SIG
sig_packages = {
package.name: package
for package in filter(
lambda p: p.sig_package, packages.values()
)
}
# calculate update backlogs
sum_backlog_len = sum(
len(package.update_backlog.keys())
for package in sig_packages.values()
)
average_backlog_len = (
sum_backlog_len / len(sig_packages.keys())
if len(sig_packages.keys()) != 0 else 0
)
# calculate maximum update delays
sum_backlog_dur = sum(
max(package.update_backlog.values(), default=0)
for package in sig_packages.values()
)
average_backlog_dur = (
sum_backlog_dur / len(sig_packages.keys())
if len(sig_packages.keys()) != 0 else 0
)
# gather number of outdated packages
abs_outdated_pkgs = len(list(
filter(lambda x: (
len(x.update_backlog.keys()) != 0
and
x.sig_package
), packages.values())
))
rel_outdated_pkgs = (abs_outdated_pkgs / len(sig_packages)) if len(sig_packages) != 0 else 0
stats["sig_pkgs"] = len(sig_packages)
stats["abs_outdated_pkgs"] = abs_outdated_pkgs
stats["rel_outdated_pkgs"] = rel_outdated_pkgs
stats["sum_bl_len"] = sum_backlog_len
stats["sum_bl_dur"] = sum_backlog_dur
stats["avg_bl_len"] = average_backlog_len
stats["avg_bl_dur"] = average_backlog_dur
# on towards the next day
current_date += timedelta(days=1)
# print today's package statistics
pprint({name: package.__dict__ for name, package in packages.items()})
for name, package in packages.items():
if package.last_version is None:
print("No release information for:", name)
if package.last_updated is None:
print("No update information for:", name)
# print package statistics
with open("sig_backlog_template.jinja2") as file:
template = jinja2.Template(file.read())
stats_document = template.render(
packages=packages,
package_ods=str(statistics[stop_date]["abs_outdated_pkgs"]),
package_num=str(statistics[stop_date]["sig_pkgs"]),
package_od_percent=(str(round(statistics[stop_date]["rel_outdated_pkgs"]*100)) + "%"),
)
with open("../_pages/sig-backlog.md", "w") as file:
file.write(stats_document)
# create markdown document of package overview table
markdown = list()
markdown.append("| package | last updated | last release | status |")
markdown.append("| ------- | ------------ | ------------ | ------ |")
for name, package in packages.items():
if not package.sig_package:
continue
if package.last_version == package.last_updated:
status = "current"
else:
days = max([*package.update_backlog.values()] + [0])
status = f"{days} days behind"
markdown.append(
f"| {package.name} "
f"| {package.last_updated} "
f"| {package.last_version} "
f"| {status} |"
)
overview_doc = "\n".join([
"---",
"title: Overview",
"layout: page",
"permalink: /overview/",
"---",
"",
"![SIG Packages](/assets/sig_pkgs.png)",
"![Total backlog duration](/assets/sum_bl_dur.png)",
"![Total backlog length](/assets/sum_bl_len.png)",
"![Average backlog duration](/assets/avg_bl_dur.png)",
"![Average backlog length](/assets/avg_bl_len.png)",
"![Number of outdated packages](/assets/od_pkgs_abs.png)",
"![Ratio of outdated packages](/assets/od_pkgs_rel.png)",
"",
] + markdown + [""])
with open("../_pages/sig-overview.md", "w") as file:
file.write(overview_doc)
# start statistics at 2019-02-26 (one day before packages were added)
stat_start_date = STATS_START_DATE
stat_stop_date = date.today()
# restrict data to interesting time period
curr_date = stat_start_date
dates = list()
# linearize statistics
sig_pkgs = list()
od_pkgs_abs = list()
od_pkgs_rel = list()
sum_bl_lens = list()
sum_bl_durs = list()
avg_bl_lens = list()
avg_bl_durs = list()
while curr_date <= stat_stop_date:
dates.append(curr_date.isoformat())
sig_pkgs.append(statistics[curr_date]["sig_pkgs"])
od_pkgs_abs.append(statistics[curr_date]["abs_outdated_pkgs"])
od_pkgs_rel.append(statistics[curr_date]["rel_outdated_pkgs"])
sum_bl_lens.append(statistics[curr_date]["sum_bl_len"])
sum_bl_durs.append(statistics[curr_date]["sum_bl_dur"])
avg_bl_lens.append(statistics[curr_date]["avg_bl_len"])
avg_bl_durs.append(statistics[curr_date]["avg_bl_dur"])
curr_date += timedelta(days=1)
# plot statistics
# get first of the month for plot ticks
firsts = list()
labels = list()
for i, d in enumerate(dates):
iso = date.fromisoformat(d)
if iso.day == 1:
firsts.append(i)
labels.append(d)
df0 = pd.DataFrame(sig_pkgs, index=dates)
df1 = pd.DataFrame(avg_bl_lens, index=dates)
df1s = pd.DataFrame(sum_bl_lens, index=dates)
df2 = | pd.DataFrame(avg_bl_durs, index=dates) | pandas.DataFrame |
import re
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u
from pandas.core.base import FrozenList, FrozenNDArray
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = | FrozenList(self.lst + [1, 2, 3]) | pandas.core.base.FrozenList |
"""
lcopt.model
-----------
Module containing the LcoptModel class.
"""
from lcopt.io import *
#from lcopt.ipython_interactive import IFS
from lcopt.interact import FlaskSandbox
from lcopt.bw2_export import Bw2Exporter
from lcopt.analysis import Bw2Analysis
# This is a copy straight from bw2data.query, extracted so as not to cause a dependency.
from lcopt.bw2query import Query, Dictionaries, Filter
from functools import partial
from collections import OrderedDict
import numpy as np
import pickle
import random
import pandas as pd
import warnings
from random import randint
from jinja2 import Environment, PackageLoader
import os
#From bw2 - edited to reinsert capitalisation of units
UNITS_NORMALIZATION = {
"a": "year", # Common in LCA circles; could be confused with acre
"Bq": "Becquerel",
"g": "gram",
"Gj": "gigajoule",
"h": "hour",
"ha": "hectare",
"hr": "hour",
"kBq": "kilo Becquerel",
"kg": "kilogram",
"kgkm": "kilogram kilometer",
"km": "kilometer",
"kj": "kilojoule",
"kWh": "kilowatt hour",
"l": "litre",
"lu": "livestock unit",
"m": "meter",
"m*year": "meter-year",
"m2": "square meter",
"m2*year": "square meter-year",
"m2a": "square meter-year",
"m2y": "square meter-year",
"m3": "cubic meter",
"m3*year": "cubic meter-year",
"m3a": "cubic meter-year",
"m3y": "cubic meter-year",
"ma": "meter-year",
"metric ton*km": "ton kilometer",
"MJ": "megajoule",
"my": "meter-year",
"nm3": "cubic meter",
"p": "unit",
"personkm": "person kilometer",
"person*km": "person kilometer",
"pkm": "person kilometer",
"tkm": "ton kilometer",
"vkm": "vehicle kilometer",
'kg sw': "kilogram separative work unit",
'km*year': "kilometer-year",
'metric ton*km': "ton kilometer",
'person*km': "person kilometer",
'Wh': 'watt hour',
}
def unnormalise_unit(unit):
if unit in UNITS_NORMALIZATION.keys():
return unit
else:
un_units = list(filter(lambda x: UNITS_NORMALIZATION[x] == unit, UNITS_NORMALIZATION))
#print (un_units)
return un_units[0]
class LcoptModel(object):
"""
This is the base model class.
To create a new model, enter a name e.g. ``model = LcoptModel('My_Model')``
To load an existing model use the ``load`` option e.g. ``model = LcoptModel(load = 'My_Model')``
"""
def __init__(self, name=hex(random.getrandbits(128))[2:-1], load=None, useForwast=False):
super(LcoptModel, self).__init__()
# name the instance
self.name = name
# set up the database, parameter dictionaries, the matrix and the names of the exchanges
self.database = {'items': OrderedDict(), 'name': '{}_Database'.format(self.name)}
self.external_databases = []
self.params = OrderedDict()
self.production_params = OrderedDict()
self.ext_params = []
self.matrix = None
self.names = None
self.parameter_sets = OrderedDict()
self.model_matrices = OrderedDict()
self.technosphere_matrices = OrderedDict()
self.leontif_matrices = OrderedDict()
self.parameter_map = {}
self.sandbox_positions = {}
# set the default names of the external databases - these can be changed if needs be
self.ecoinventName = "Ecoinvent3_3_cutoff"
self.biosphereName = "biosphere3"
self.ecoinventFilename = "ecoinvent3_3"
self.biosphereFilename = "biosphere3"
self.forwastName = "forwast"
self.forwastFilename = "forwast"
self.useForwast = useForwast
if self.useForwast:
self.technosphere_databases = [self.forwastName]
else:
self.technosphere_databases = [self.ecoinventName]
self.biosphere_databases = [self.biosphereName]
# default settings for bw2 analysis
self.analysis_settings = {'amount': 1,
'methods': [('IPCC 2013', 'climate change', 'GWP 100a'), ('USEtox', 'human toxicity', 'total')],
'top_processes': 10,
'gt_cutoff': 0.01,
'pie_cutoff': 0.05
}
# initialise with a blank result set
self.result_set = None
if load is not None:
self.load(load)
asset_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'assets')
ecoinventPath = os.path.join(asset_path, self.ecoinventFilename)
biospherePath = os.path.join(asset_path, self.biosphereFilename)
forwastPath = os.path.join(asset_path, self.forwastFilename)
# Try and initialise the external databases if they're not there already
if self.useForwast:
if self.forwastName not in [x['name'] for x in self.external_databases]:
self.import_external_db(forwastPath, 'technosphere')
else:
if self.ecoinventName not in [x['name'] for x in self.external_databases]:
self.import_external_db(ecoinventPath, 'technosphere')
if self.biosphereName not in [x['name'] for x in self.external_databases]:
self.import_external_db(biospherePath, 'biosphere')
# create partial version of io functions
self.add_to_database = partial(add_to_specified_database, database=self.database)
self.get_exchange = partial(get_exchange_from_database, database=self.database)
self.exists_in_database = partial(exists_in_specific_database, database=self.database)
self.get_name = partial(get_exchange_name_from_database, database=self.database)
self.get_unit = partial(get_exchange_unit_from_database, database=self.database)
self.parameter_scan()
def rename(self, newname):
"""change the name of the model (i.e. what the .lcopt file will be saved as)"""
self.name = newname
#def saveAs(self, filename):
# """save the instance as a pickle"""
# pickle.dump(self, open("{}.pickle".format(filename), "wb"))
def save(self):
"""save the instance as a .lcopt file"""
pickle.dump(self, open("{}.lcopt".format(self.name), "wb"))
def load(self, filename):
"""load data from a saved .lcopt file"""
if filename[-6:] != ".lcopt":
filename += ".lcopt"
savedInstance = pickle.load(open("{}".format(filename), "rb"))
attributes = ['name',
'database',
'params',
'production_params',
'ext_params',
'matrix',
'names',
'parameter_sets',
'model_matrices',
'technosphere_matrices',
'leontif_matrices',
'external_databases',
'parameter_map',
'sandbox_positions',
'ecoinventName',
'biosphereName',
'forwastName',
'analysis_settings',
'technosphere_databases',
'biosphere_databases',
'result_set',
'evaluated_parameter_sets',
'useForwast'
]
for attr in attributes:
if hasattr(savedInstance, attr):
setattr(self, attr, getattr(savedInstance, attr))
def create_product (self, name, location='GLO', unit='kg', **kwargs):
"""
Create a new product in the model database
"""
new_product = item_factory(name=name, location=location, unit=unit, type='product', **kwargs)
if not self.exists_in_database(new_product['code']):
self.add_to_database(new_product)
print ('{} added to database'.format(name))
return self.get_exchange(name)
else:
#print('{} already exists in this database'.format(name))
return False
def create_process(self, name, exchanges, location='GLO', unit='kg'):
"""
Create a new process, including all new exchanges (in brightway2's exchange format) in the model database.
Exchanges must have at least a name, type and unit field
"""
found_exchanges = []
for e in exchanges:
exc_name = e.pop('name', None)
exc_type = e.pop('type', None)
this_exchange = self.get_exchange(exc_name)
if this_exchange is False:
my_unit = e.pop('unit', unit)
this_exchange = self.create_product(exc_name, location=location, unit=my_unit, **e)
found_exchanges.append(exchange_factory(this_exchange, exc_type, 1, 1, '{} exchange of {}'.format(exc_type, exc_name)))
new_process = item_factory(name=name, location=location, unit=unit, type='process', exchanges=found_exchanges)
self.add_to_database(new_process)
self.parameter_scan()
return True
def check_param_function_use(self, param_id):
current_functions = {k: x['function'] for k, x in self.params.items() if x['function'] is not None}
problem_list = []
for k, f in current_functions.items():
if param_id in f:
problem_list.append((k, f))
return problem_list
def remove_input_link(self, process_code, input_code):
"""
Remove an input (technosphere or biosphere exchange) from a process, resolving all parameter issues
"""
# 1. find correct process
# 2. find correct exchange
# 3. remove that exchange
# 4. check for parameter conflicts?
# 4. run parameter scan to rebuild matrices?
#print(process_code, input_code)
process = self.database['items'][process_code]
exchanges = process['exchanges']
initial_count = len(exchanges)
new_exchanges = [e for e in exchanges if e['input'] != input_code]
product_code = [e['input'] for e in exchanges if e['type'] == 'production'][0]
#print(product_code)
param_id = [k for k, v in self.params.items() if (v['from'] == input_code[1] and v['to'] == product_code[1])][0]
#print (param_id)
problem_functions = self.check_param_function_use(param_id)
if len(problem_functions) != 0:
print('the following functions have been removed:')
for p in problem_functions:
self.params[p[0]]['function'] = None
print(p)
process['exchanges'] = new_exchanges
del self.params[param_id]
self.parameter_scan()
return initial_count - len(new_exchanges)
def unlink_intermediate(self, sourceId, targetId):
"""
Remove a link between two processes
"""
source = self.database['items'][(self.database.get('name'), sourceId)]
target = self.database['items'][(self.database.get('name'), targetId)]
production_exchange = [x['input'] for x in source['exchanges'] if x['type'] == 'production'][0]
new_exchanges = [x for x in target['exchanges'] if x['input'] != production_exchange]
target['exchanges'] = new_exchanges
self.parameter_scan()
return True
def parameter_scan(self):
"""
Scan the database of the model instance to generate and expose parameters.
This is called by other functions when items are added/removed from the model, but can be run by itself if you like
"""
#self.parameter_map = {}
#self.params = OrderedDict()
cr_list = []
items = self.database['items']
#print(items)
for key in items.keys():
i = items[key]
#print(i['name'], i['type'])
if i['type'] == 'product':
cr_list.append(i['code'])
no_products = len(cr_list)
self.names = [self.get_name(x) for x in cr_list]
self.matrix = np.zeros((no_products, no_products))
for key in items.keys():
i = items[key]
if i['type'] == 'process':
inputs = []
#print(i['name'])
#print([(e['comment'], e['type']) for e in i['exchanges']])
for e in i['exchanges']:
if e['type'] == 'production':
col_code = cr_list.index(e['input'][1])
if not 'p_{}_production'.format(col_code) in self.production_params:
self.production_params['p_{}_production'.format(col_code)] = {
'function': None,
'description': 'Production parameter for {}'.format(self.get_name(e['input'][1])),
'unit': self.get_unit(e['input'][1]),
'from': e['input'],
'from_name': self.get_name(e['input'][1]),
'type': 'production',
}
elif e['type'] == 'technosphere':
#print(cr_list)
row_code = cr_list.index(e['input'][1])
inputs.append((row_code, e['amount']))
for ip in inputs:
self.matrix[(ip[0], col_code)] = ip[1]
param_check_list = []
for c, column in enumerate(self.matrix.T):
for r, i in enumerate(column):
if i > 0:
p_from = cr_list[r]
p_to = cr_list[c]
coords = (r, c)
from_item_type = self.database['items'][(self.database['name'], p_from)]['lcopt_type']
#print('{}\t| {} --> {}'.format(coords, self.get_name(p_from), self.get_name(p_to)))
param_check_list.append('p_{}_{}'.format(coords[0], coords[1]))
if not 'p_{}_{}'.format(coords[0], coords[1]) in self.params:
self.params['p_{}_{}'.format(coords[0], coords[1])] = {
'function': None,
'normalisation_parameter': 'p_{}_production'.format(coords[1]),
'description': 'Input of {} to create {}'.format(self.get_name(p_from), self.get_name(p_to)),
'coords': coords,
'unit': self.get_unit(p_from),
'from': p_from,
'from_name': self.get_name(p_from),
'to': p_to,
'to_name': self.get_name(p_to),
'type': from_item_type,
}
elif 'normalisation_parameter' not in self.params['p_{}_{}'.format(coords[0], coords[1])].keys():
#print("Adding normalisation_parameter to {}".format('p_{}_{}'.format(coords[0], coords[1])))
self.params['p_{}_{}'.format(coords[0], coords[1])]['normalisation_parameter'] = 'p_{}_production'.format(coords[1])
#print('p_{}_{} already exists'.format(coords[0],coords[1]))
else:
pass # print("SOMETHING WRONG HERE\n{}\n".format(self.params['p_{}_{}'.format(coords[0], coords[1])]))
if not 'p_{}_{}'.format(coords[0], coords[1]) in self.parameter_map:
self.parameter_map[(p_from, p_to)] = 'p_{}_{}'.format(coords[0], coords[1])
kill_list = []
for k in self.params.keys():
if k not in param_check_list:
print("{} may be obsolete".format(k))
kill_list.append(k)
for p in kill_list:
print("deleting parameter {}".format(p))
del self.params[p]
return True
def generate_parameter_set_excel_file(self):
"""
Generate an excel file containing the parameter sets in a format you can import into SimaPro Developer.
The file will be called "ParameterSet_<ModelName>_input_file.xlsx"
"""
parameter_sets = self.parameter_sets
p_set = []
p_set_name = "ParameterSet_{}_input_file.xlsx".format(self.name)
p = self.params
for k in p.keys():
if p[k]['function'] is None:
base_dict = {'id': k, 'name': p[k]['description'], 'unit': p[k]['unit']}
for s in parameter_sets.keys():
base_dict[s] = parameter_sets[s][k]
p_set.append(base_dict)
else:
pass
#print("{} is determined by a function".format(p[k]['description']))
for e in self.ext_params:
base_dict = {'id': '{}'.format(e['name']), 'type': 'external', 'name': e['description'], 'unit': ''}
for s in parameter_sets.keys():
base_dict[s] = parameter_sets[s][e['name']]
p_set.append(base_dict)
df = pd.DataFrame(p_set)
writer = pd.ExcelWriter(p_set_name, engine='xlsxwriter')
ps_columns = [k for k in parameter_sets.keys()]
#print (ps_columns)
my_columns = ['name', 'unit', 'id']
my_columns.extend(ps_columns)
#print (my_columns)
df.to_excel(writer, sheet_name=self.name, columns=my_columns, index=False, merge_cells=False)
return p_set_name
def add_parameter(self, param_name, description=None, default=0, unit=None):
"""
Add a global parameter to the database that can be accessed by functions
"""
if description is None:
description = "Parameter called {}".format(param_name)
if unit is None:
unit = "-"
name_check = lambda x: x['name'] == param_name
name_check_list = list(filter(name_check, self.ext_params))
if len(name_check_list) == 0:
self.ext_params.append({'name': param_name, 'description': description, 'default': default, 'unit': unit})
else:
print('{} already exists - choose a different name'.format(param_name))
def list_parameters_as_df(self):
"""
Only really useful when running from a jupyter notebook.
Lists the parameters in the model in a pandas dataframe
Columns: id, matrix coordinates, description, function
"""
to_df = []
for i, e in enumerate(self.ext_params):
row = {}
row['id'] = e['name']
row['coords'] = "n/a"
row['description'] = e['description']
row['function'] = "n/a"
to_df.append(row)
for pk in self.params:
p = self.params[pk]
row = {}
row['id'] = pk
row['coords'] = p['coords']
row['description'] = p['description']
row['function'] = p['function']
to_df.append(row)
df = | pd.DataFrame(to_df) | pandas.DataFrame |
#! /usr/bin/env python
### Script containing varous plotting functions for splitting Measurements
import pandas as pd
import sys
import os
import shlex
from subprocess import call
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib
import numpy as np
from stack import Stacker
from time import ctime
from glob import glob
from l2stats import ftest
import obspy
import obspy.taup
class Builder:
"""
Class to construct a .pairs Database (and Pierce Points), calc D_SI values and stack L2 surfaces.
- sdb_stem: [str] sdb file stem for the data that you want to process
- path: [str] path to Sheba/Results directory that contains the sdb's that you want
- RunDir: [str] path to the Run Directory than Contains the .lamR surfaces to stack
"""
def __init__(self,p,RunDir,sdb_stem,snr=5.0,syn=False):
self.path = p # Path to Results directory
self.path_stk = '/Users/ja17375/DiscrePy/Sheba/Runs/{}'.format(RunDir)
self.sdb_stem = sdb_stem
self.fpath = '{}/{}_{:02d}.pairs'.format(self.path,self.sdb_stem,int(snr))
self.lam2_bar = [ ] # Variabel to hold lambda2 values returned from stacking routine
self.lam2alpha_sks = [ ] # Variable to hold list of lam2alpha values for SKS
self.lam2alpha_skks = [ ] # Variable to holf lam2alpha values for SKKS
self.lam2_sum = [ ]
self.snr = snr # Holds SNR cutoff (defaul is 5)
self.syn=syn
#if kwargs are none:\
def run(self):
'''Function to build the pairs file'''
# First match the SKS and SKKS
start = ctime()
print('Making Pairs')
self.make_pairs()
# Apply a quick Signal to Noise test to get rid of the rreally bad data
self.P = self.QA_tests() # Overwrite self.P weith only accepted events
self.write_out(self.P,name='{}_{:02d}.pairs'.format(self.sdb_stem,int(self.snr)))
# Write initial pairs file so we can make piercepoints
# Next generate the piercepoints and add them to the df
print('Adding PiercePoints')
self.gen_pp()
self.add_pp()
# Now calculate the d_SI vlaues
print('Calculate difference in splitting intensity')
self.add_DSI()
# Finally stack the lamR surfaces to determine the lam2 values
if self.syn is False:
''' I.e Are we using real data? Because of the set up of the synthetics (and I am only using 6 pairs) I will stack these more manually and I dont want to reject any '''
print('Calculate lambda 2 values')
self.add_lam2()
#Now test for matching and disrecpent pairs
print('Apply 2-sigma test for discrepancy')
# self.match_sig2()
print(r'Apply $\bar{\lambda_2}$ & $\Delta SI$ test')
self.match_l2()
# print('{} pairs'.format(len(self.P)))
# And save the result
self.write_out(self.P,'{}_{:02d}.pairs'.format(self.sdb_stem,int(self.snr)))
end = ctime()
print('END. start {}, end {}'.format(start,end))
def gen_pp(self):
''' Fucntion to test for whether the .pp file exists and if not call TauP to generate it and the corresponding
files '''
print('Looking for pp file {}.pp'.format(self.fpath.split('.')[0]))
if os.path.isfile('{}.pp'.format(self.fpath.split('.')[0])):
#print(pf[:-6])
print('Exists')
self.pp = pd.read_csv('{}.pp'.format(self.fpath.split('.')[0]),delim_whitespace=True)
else:
print('Pierce Points file {}.pp doesnt not exist, calling pierce.sh'.format(self.fpath.split('.')[0]))
p = call(shlex.split('/Users/ja17375/DiscrePy/Sheba/Programs/pierce.sh {}'.format(self.fpath)))
self.pp = pd.read_csv('{}.pp'.format(self.fpath.split('.')[0]),delim_whitespace=True)
# Load SDB and PP (pierce points data for a set of SKS-SKKS pairs)
if os.path.isfile('{}.mspp'.format(self.fpath.split('.')[0])) is False:
print('{}.mspp does not exist, creating'.format(self.fpath.split('.')[0]))
with open('{}.mspp'.format(self.fpath.split('.')[0]),'w+') as writer:
for i,row in enumerate(self.pp.index):
writer.write('> \n {} {} \n {} {} \n'.format(self.pp.lon_SKS[i],self.pp.lat_SKS[i],self.pp.lon_SKKS[i],self.pp.lat_SKKS[i]))
def make_pairs(self):
"""
Function to take .sdb files for SKS and SKKS and generate the set of SKS-SKKS pais that exist in the dataset.
inputs
"""
## Set Path to Sheba Directory
SKS_sdb = '{}_SKS_sheba_results.sdb'.format(self.sdb_stem)
SKKS_sdb = '{}_SKKS_sheba_results.sdb'.format(self.sdb_stem)
# For Synthetics I didnt bother putting in the sheba results part so use these
# SKS_sdb = '{}_SKS.sdb'.format(self.sdb_stem)
# SKKS_sdb = '{}_SKKS.sdb'.format(self.sdb_stem)
# First import the SKS and SKKS .sdb files (sdb = splitting database)
date_time_convert = {'TIME': lambda x: str(x),'DATE': lambda x : str(x)}
SKS = pd.read_csv('{}/{}'.format(self.path,SKS_sdb),delim_whitespace=True,converters=date_time_convert)
SKKS = pd.read_csv('{}/{}'.format(self.path,SKKS_sdb),delim_whitespace=True,converters=date_time_convert)
# Now the sdb files have been read as pd dataframes, we can perform an inner join. This will return a single dataframe containing all rows from SKS and SKKS where
# ['DATE','TIME','STAT','STLA','STLO','EVLA','EVLO','EVDP','DIST','AZI','BAZ'] are the same.
SKS_SKKS_pair = pd.merge(SKS,SKKS,on=['DATE','TIME','STAT','STLA','STLO','EVLA','EVLO','EVDP','DIST','AZI','BAZ'],how='inner')
relabel = {'FAST_x':'FAST_SKS', 'DFAST_x': 'DFAST_SKS','TLAG_x':'TLAG_SKS','DTLAG_x':'DTLAG_SKS','SPOL_x':'SPOL_SKS','DSPOL_x':'DSPOL_SKS',
'WBEG_x':'WBEG_SKS','WEND_x':'WEND_SKS','EIGORIG_x':'EIGORIG_SKS','EIGCORR_x':'EIGCORR_SKS','Q_x':'Q_SKS','SNR_x':'SNR_SKS','NDF_x':'NDF_SKS',
'SI(Pr)_x':'SI(Pr)_SKS', 'SI(Pa)_x':'SI(Pa)_SKS','FAST_y':'FAST_SKKS', 'DFAST_y': 'DFAST_SKKS','TLAG_y':'TLAG_SKKS','DTLAG_y':'DTLAG_SKKS',
'SPOL_y':'SPOL_SKKS','DSPOL_y':'DSPOL_SKKS','WBEG_y':'WBEG_SKKS','WEND_y':'WEND_SKKS','EIGORIG_y':'EIGORIG_SKKS','EIGCORR_y':'EIGCORR_SKKS',
'Q_y':'Q_SKKS','SNR_y':'SNR_SKKS','NDF_y':'NDF_SKKS','SI(Pr)_y':'SI(Pr)_SKKS', 'SI(Pa)_y':'SI(Pa)_SKKS'}
# The dictionary relabels the other columns in the join so that we can more easily pick apart the SKS and SKKS results
SKS_SKKS_pair.rename(relabel,axis='columns',inplace=True)
# Sort the Pairs dataframe so the pairs are in chronological order (by origin time (DATE only))
self.P = SKS_SKKS_pair.sort_values(by=['DATE'],ascending=True)
def write_out(self,df,name):
print('Writing to {}'.format(name))
df.to_csv('{}/{}'.format(self.path,name),sep=' ',index=False)
def add_DSI(self):
'''Calculate the difference in Splitting Intensity for each pair and add it to dataframe'''
si_pr_sks = self.P['SI(Pr)_SKS']
si_pr_skks = self.P['SI(Pr)_SKKS']
si_pa_sks = self.P['SI(Pa)_SKS']
si_pa_skks = self.P['SI(Pa)_SKKS']
d_si_pr = np.abs(si_pr_sks-si_pr_skks)
d_si_pa = np.abs(si_pa_sks-si_pa_skks)
# d = {'D_SI_Pr': d_si_pr,'D_SI_Pa':d_si_pa}
d = {'SI_Pr_sks': si_pr_sks, 'SI_Pr_skks': si_pr_skks,'SI_Pa_sks': si_pa_sks,'SI_Pa_skks': si_pa_skks,
'D_SI_Pr': d_si_pr,'D_SI_Pa':d_si_pa} # Special version which also adds raw splitting intensity
ddf = pd.DataFrame(d)
self.P[['SI_Pr_sks','SI_Pr_skks','SI_Pa_sks','SI_Pa_skks','D_SI_Pr','D_SI_Pa']] = ddf
#Delete SI cols as we dont need them any more ?
# del self.P['INTENS_x']
# del self.P['INTENS_y']
def pair_stack(self):
''' Runs Stacker for all the desired pairs (a .pairs file)'''
ext ='lamR'
print('Stacking')
rd = self.path_stk.split('/')[-1]
out = '/Users/ja17375/DiscrePy/Sheba/Results/Combined/{}/Stacks'.format(rd) # For Filt 03/05 casesed need to hardcode in Combined/ directory
if os.path.isdir(out) is False:
print('{} does not exist, creating'.format(out))
os.mkdir('/Users/ja17375/DiscrePy/Sheba/Results/Combined/{}/Stacks'.format(rd))
for i,f in enumerate(self.P.DATE.values):
# print(len(self.P))
# print('It {}, time is {} '.format(i,str(datetime.now())))
# First get the right DATE,TIME and STATION
date,time,stat = self.P.DATE[i], self.P.TIME[i], self.P.STAT[i]
fstem = '{}_{}_{}'.format(stat,date,time)
lam2_stem = glob('{}/{}/SKS/{}??_SKS.{}'.format(self.path_stk,stat,fstem,ext))
# print(lam2_stem)
print('{}/{}/SKS/{}??_SKS.lamR'.format(self.path_stk,stat,fstem))
print('{}/{}/SKKS/{}??_SKKS.{}'.format(self.path_stk,stat,fstem,ext))
if len(lam2_stem) != 0:
# I.e if glob has managed to find the sks lam2 surface file
sks_lam2 = glob('{}/{}/SKS/{}??_SKS.{}'.format(self.path_stk,stat,fstem,ext))[0]
skks_lam2 = glob('{}/{}/SKKS/{}??_SKKS.{}'.format(self.path_stk,stat,fstem,ext))[0]
Stk = Stacker(sks_lam2,skks_lam2,out)
self.lam2_bar.append(Stk.lam2_bar)
ndf_sks, ndf_skks = self.P.NDF_SKS[i],self.P.NDF_SKKS[i]
self.lam2alpha_sks.append(ftest(Stk.lam2_sks,ndf_sks)) # Calc Lam2 Alpha and append it to list for SKS and SKKS
self.lam2alpha_skks.append(ftest(Stk.lam2_skks,ndf_skks))
elif len(glob('{}/{}/SKS/{}_{}_*_SKS.{}'.format(self.path_stk,stat,stat,date,ext))) !=0 :
fstem2 = '{}_{}'.format(stat,date)
# print('fstem2')
# print('{}/{}/SKS/{}_*_SKS.{}'.format(self.path_stk,stat,fstem2,ext))
sks_lam2 = glob('{}/{}/SKS/{}_*_SKS.{}'.format(self.path_stk,stat,fstem2,ext))[0]
skks_lam2 = glob('{}/{}/SKKS/{}_*_SKKS.{}'.format(self.path_stk,stat,fstem2,ext))[0]
# Now for a sanity check
if (len(sks_lam2) != 0) or (len(skks_lam2) != 0):
Stk = Stacker(sks_lam2,skks_lam2,out)
self.lam2_bar.append(Stk.lam2_bar)
ndf_sks, ndf_skks = self.P.NDF_SKS[i],self.P.NDF_SKKS[i]
self.lam2alpha_sks.append(ftest(Stk.lam2_sks,ndf_sks)) # Calc Lam2 Alpha and append it to list for SKS and SKKS
self.lam2alpha_skks.append(ftest(Stk.lam2_skks,ndf_skks))
else:
print('lam2 surfaces cannot be found, skipping')
pass
else:
print('Glob returns no files, see:')
print(glob('{}/{}/SKS/{}_{}_*_SKS.{}'))
# Now calulate LAM2ALPHA for SKS and SKKS
def add_lam2(self):
'''
Stack the associated raw lambda 2 surfaces (as output by sheba) for each SKS SKKS pair and find min value
'''
self.pair_stack()
lam2_sum = [self.lam2alpha_sks[i] + self.lam2alpha_skks[i] for i in range(0,len(self.lam2alpha_sks))]
print(len(self.lam2_bar),len(lam2_sum),len(self.lam2alpha_sks),len(self.lam2alpha_skks))
l2df = {'LAM2_BAR' : self.lam2_bar, 'LAM2_SUM' : lam2_sum, 'LAM2A_SKS' : self.lam2alpha_sks, 'LAM2A_SKKS' : self.lam2alpha_skks}
ldf = pd.DataFrame(l2df)
self.P[['LAM2_BAR','LAM2_SUM','LAM2A_SKS','LAM2A_SKKS']] = ldf
def add_pp(self):
'''Adds piercepoints to .pairs file'''
if len(self.pp) == len(self.P):
print(len(self.pp) ,len(self.P))
self.P['SKS_PP_LAT'] = self.pp.lat_SKS
self.P['SKS_PP_LON'] = self.pp.lon_SKS
self.P['SKKS_PP_LAT'] = self.pp.lat_SKKS
self.P['SKKS_PP_LON'] = self.pp.lon_SKKS
print('Added piercepoints')
else:
print('Dimension mismatch, not adding piercepoints')
def match_sig2(self,sigma=2):
"""
Funntion to see if the SKS and SKKS splititng measurements for a pair of measurements match within error
file - the filename you want for the output files
Default error for this kind of anlysis is 2-sigma. Sheba returns 1 sigma so the DFAST and DTLAG need to be scaled appropriatly.
"""
fstem = self.path.split('.')[0]# Split('.')[0] takes off the extension
# First Add piercepoints to pairas files
# self.add_pp()
# Set the SKS and SKKS 2-sigma rnages
lbf_SKS = self.P.FAST_SKS - sigma*self.P.DFAST_SKS
ubf_SKS = self.P.FAST_SKS + sigma*self.P.DFAST_SKS
lbf_SKKS = self.P.FAST_SKKS - sigma*self.P.DFAST_SKKS
ubf_SKKS = self.P.FAST_SKKS + sigma*self.P.DFAST_SKKS
lbt_SKS = self.P.TLAG_SKS - sigma*self.P.DTLAG_SKS
ubt_SKS = self.P.TLAG_SKS + sigma*self.P.DTLAG_SKS
lbt_SKKS = self.P.TLAG_SKKS - sigma*self.P.DTLAG_SKKS
ubt_SKKS = self.P.TLAG_SKKS+ sigma*self.P.DTLAG_SKKS
# Now set the logic for the tests
fast_test = (lbf_SKKS <= ubf_SKS) & (lbf_SKS <= ubf_SKKS)
lag_test = (lbt_SKKS <= ubt_SKS) & (lbt_SKS <= ubt_SKKS)
# Q_test = (self.P.Q_SKS <= -0.5 | self.P.Q_SKKS <= -0.5) & (self.P.Q_SKS >= 0.5 | self.P.Q_SKKS >= 0.5)
match = self.P[(fast_test & lag_test)] # Test for pairs that match within the given sigma range
diff = self.P.drop(index=match.index) # Remove matching pairs from original df to get the different pairs.
# Write out matching and discepent dataframes
match.to_csv('{}/{}_{:02d}_matches.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
diff.to_csv('{}/{}_{:02d}_diffs.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
# Open up mspp files
print('Writing to {}/{}_{:02d}'.format(self.path,self.sdb_stem,int(self.snr)))
mspp_match = open('{}/{}_{:02d}_matches.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_diff = open('{}/{}_{:02d}_diffs.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
for i,index in enumerate(diff.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_diff.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate(match.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_match.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
mspp_diff.close()
mspp_match.close()
def match_l2(self):
"""
Function to test if a SK(K)S pair are matching or discrepant. This test is based on lb2 and dSI (see synthetics_stakcs notes)
This function assumes that lam2 and dSI have already been calculated and added to the pairs file. It also assumes that you
want to keep the same file names but just add _match_l2 and _diff_l2self.
The lam2 threshold used is 0.01 and the dSI threshold is 0.15
"""
fstem = self.path.split('.')[0]# Split('.')[0] takes off the extension
P = self.P.copy()
# print(P.Q_SKS)
# print(P.Q_SKKS)
# Now apply the test to find the discrepant pairs, by definition the remainder must by the matches
# First find pairs that are split or not
uID = P[((P.Q_SKS > -0.7) & (P.Q_SKS < 0.5)) | ((P.Q_SKKS > -0.7) & (P.Q_SKKS < 0.5))]
P.drop(uID.index)
null_pairs = P[((P.Q_SKS <= -0.7) & (P.Q_SKKS <= -0.7))] # Pairs where both phases are nulls (according to Q), auto classify as matching
null_split_pair = P[(((P.Q_SKS <= -0.7) & (P.Q_SKKS >= 0.5)) | ((P.Q_SKS >= 0.5) & (P.Q_SKKS <= -0.7)))] # Test for pairs with 1 null 1 split, discrepant by definition
splits = P[((P.Q_SKS > 0.5) & (P.Q_SKKS > 0.5 ))] # Test for pairs whjere both phases are split
t_l2_splits = splits.LAM2_SUM # Lam2_SUm is now the sum of the two 95% confidence levels
t_l2_ns = null_split_pair.LAM2_SUM
t_dSI = 0.4 # Threshold of 0.4 taken from Deng et al (2017)
diff= splits[(splits.LAM2_BAR > t_l2_splits) & (splits.D_SI_Pr > t_dSI)] #| (splits.D_SI > t_dSI))] # Apply tests for discrepant splitting
match = splits[(splits.LAM2_BAR <= t_l2_splits) | (splits.D_SI_Pr <= t_dSI)] # If the pair fails either lam2 or dSI test then we call it matching
diff_dsi = splits[(splits.D_SI_Pr > 0.4)]
match_dsi = splits[(splits.D_SI_Pr <= 0.4)]
ns_diff = null_split_pair[(null_split_pair.LAM2_BAR > t_l2_ns) & (null_split_pair.D_SI_Pr > t_dSI)]
ns_match = null_split_pair[(null_split_pair.LAM2_BAR <= t_l2_ns) | (null_split_pair.D_SI_Pr <= t_dSI)]
print(len(self.P))
print('There are {} split pairs. {} are matches and {} are discrepant!'.format(len(splits),len(match),len(diff)))
print('There are {} null-split pairs. {} are matches and {} are discrepant!'.format(len(null_split_pair),len(ns_match),len(ns_diff)))
test = len(uID) + len(null_pairs) + len(null_split_pair) + len(diff) + len(match)
print(test)
# Now combined matching and discrepant pairs together
null_pairs.to_csv('{}/{}_{:02d}_nulls.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
ns_match.to_csv('{}/{}_{:02d}_matches_null_split.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
ns_diff.to_csv('{}/{}_{:02d}_diffs_null_split.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
match.to_csv('{}/{}_{:02d}_matches.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
diff.to_csv('{}/{}_{:02d}_diffs.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
match_dsi.to_csv('{}/{}_{:02d}_matches_dsi.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
diff_dsi.to_csv('{}/{}_{:02d}_diffs_dsi.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
uID.to_csv('{}/{}_{:02d}_uID_l2.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
# Open up mspp files
print('Writing to {}/{}_{:02d}'.format(self.path,self.sdb_stem,int(self.snr)))
mspp_match = open('{}/{}_{:02d}_matches.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_diff = open('{}/{}_{:02d}_diffs.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
# mspp_match_dsi = open('{}/{}_{:02d}_matches_dsi.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
# mspp_diff_dsi = open('{}/{}_{:02d}_diffs_dsi.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_uID = open('{}/{}_{:02d}_uID_l2.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_null_pairs = open('{}/{}_{:02d}_nulls.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_null_split_match = open('{}/{}_{:02d}_matches_null_split.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
mspp_null_split_diff = open('{}/{}_{:02d}_diffs_null_split.mspp'.format(self.path,self.sdb_stem,int(self.snr)),'w+')
for i,index in enumerate(diff.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_diff.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate(null_pairs.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_null_pairs.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate( ns_match.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_null_split_match.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate( ns_diff.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_null_split_diff.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate(match.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_match.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
for i,index in enumerate(uID.index):
SKS_pp_lat = self.pp.lat_SKS.values[index]
SKS_pp_lon = self.pp.lon_SKS.values[index]
SKKS_pp_lat = self.pp.lat_SKKS.values[index]
SKKS_pp_lon = self.pp.lon_SKKS.values[index]
#print(i,date,stat,evla,evlo,stla,stlo,SKS_pp_lat,SKS_pp_lon)
mspp_uID.write('> \n {} {} \n {} {} \n'.format(SKS_pp_lon,SKS_pp_lat,SKKS_pp_lon,SKKS_pp_lat))
mspp_uID.close()
mspp_diff.close()
mspp_match.close()
mspp_null_pairs.close()
mspp_null_split_match.close()
mspp_null_split_diff.close()
def QA_tests(self):
''' Run some basic QA tests on SNR and SPOL/BAZ. We test to see if signal to noise is greater than 5 (which our synthetics work has shown to be the point where one can
reasonable expect sheba to converge toward the true solution for phi,dt) and SPOL/BAZ to avoid beamformin imssues etc. SPOL/BAZ failuers are output for potential future investigation'''
t = self.snr
print(t)
self.accepted_i = [ ]
self.snr_fail = [ ]
self.baz_spol_fail = [ ]
print('There are {} pairs pre-SNR < 2 test'.format(len(self.P)))
for i,row in self.P.iterrows():
if row.SNR_SKS > self.snr and row.SNR_SKKS > self.snr:
#Test to see if Signal-to-Noise is too high
if abs(row.BAZ%180 - row.SPOL_SKS%180) < 10 and abs(row.BAZ%180 - row.SPOL_SKKS%180) < 10:
# Test to see if there is a asignficiant difference between source polarisation and back azimuth abs
print('Event accepted')
self.accepted_i.append(i)
else:
print('SPOL-BAZ difference greater than 10, reject')
self.baz_spol_fail.append(i)
else:
print('SNR for SKS or SKKS less than {:02}, auto-reject'.format(self.snr))
self.snr_fail.append(i)
print('{} accepted, {} rejected for SNR, {} rejected for BAZ-SPOL'.format(len(self.accepted_i),len(self.snr_fail),len(self.baz_spol_fail)))#,len(self.baz_spol_fail)))
drop = self.snr_fail #+ self.baz_spol_fail
accepted_pairs_snr = self.P.drop(drop)
accepted_pairs = accepted_pairs_snr.drop(self.baz_spol_fail)
rejected_baz_spol = accepted_pairs_snr.drop(self.accepted_i)
rejected_baz_spol.to_csv('{}/{}_{:02d}_rejected_baz_spol.pairs'.format(self.path,self.sdb_stem,int(self.snr)),index=False,sep=' ')
accepted_pairs.index = pd.RangeIndex(len(accepted_pairs.index))
return accepted_pairs
class Pairs:
""" A class to actually hold a Pairs (read as a pandas dataframe) and then do useful stuff with it """
def __init__(self,df=None,file=False,fname=None,syn=False,syn1=None,syn2=None,synstk=None):
'''
df - a pandas dataframe contaiing the pairs (implicitly assumed that this df has been made using builder)
file [bool] - T/F flag for if you want to read in a already existing pairs file. This option is easier if you have not freshly created it as the correct converters to preserve
leading zeros in date and time will be applied
fname [str] - Full path to (and including) the pairs file your want to read in.
'''
self.rpath = '/Users/ja17375/DiscrePy/'
self.syn = syn # [Bool] - is data synthetics or not?
if self.syn == True:
self.syn1 = syn1
self.syn2 = syn2
self.synstk = synstk
if file is True:
print('Read file option specified')
date_time_convert = {'TIME': lambda x: str(x).zfill(4),'DATE': lambda x : str(x)}
self.df = pd.read_csv(fname,delim_whitespace=True,converters=date_time_convert)
try:
# Now read in new match,diffs made using lam2/dSI test
pmatch_l2 = '{}_matches.pairs'.format(fname.split('.')[0])
pdiff_l2 ='{}_diffs.pairs'.format(fname.split('.')[0])
pmatch_dsi = '{}_matches_dsi.pairs'.format(fname.split('.')[0])
pdiff_dsi = '{}_diffs_dsi.pairs'.format(fname.split('.')[0])
self.matches = pd.read_csv(pmatch_l2,delim_whitespace=True,converters=date_time_convert)
self.diffs = | pd.read_csv(pdiff_l2,delim_whitespace=True,converters=date_time_convert) | pandas.read_csv |
import numpy as np
import pandas as pd
import os
DATA_FOLDER = 'data'
class Vehicle(object):
def __init__(self):
self.x = 0
self.y = 0
self.rides = []
def dist_to_ride(self, ride):
return np.abs(self.x - ride.start_x) + np.abs(self.y - ride.start_y)
def distance(coords1, coords2):
return (coords2[1] - coords1[1]) + (coords2[0] - coords1[0])
def parse_data(filename):
data = np.loadtxt(filename)
rows, columns, vehicles, rides, bonus, steps = tuple(data[0, :])
info = {
'rows': rows,
'columns': columns,
'nb_rides': int(rides),
'nb_vehicles': vehicles,
'bonus': bonus,
'nb_steps': steps
}
rides = data[1:, :]
rides_df = | pd.DataFrame(data=rides, columns=["start_x", "start_y", "end_x", "end_y", "min_start", "max_finish"]) | pandas.DataFrame |
from tkinter import *
import tkinter as tk
import os
import datetime as dt
from dateutil.relativedelta import relativedelta
import pytz
from openpyxl import load_workbook
import win32com.client
import win32api
import xlrd
from simple_salesforce import Salesforce
import pandas as pd
from openpyxl.styles import Font, Color, PatternFill
import win32timezone
xl = win32com.client.Dispatch("Excel.Application")
datenow = dt.datetime.today()
lastmonth = datenow - relativedelta(months=1)
root = tk.Tk()
root.title("Outlook-SFDC Sync")
# Add a grid
mainframe = Frame(root)
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
mainframe.pack(pady=25, padx=25)
# Create a Tkinter variable
tkvar = StringVar(root)
tkvar2 = StringVar(root)
# Dictionary with options
choices = {'Outlook Download', 'Outlook SFDC Upload',}
tkvar.set('Outlook Download') # set the default option
popupMenu = OptionMenu(mainframe, tkvar, *choices)
Label(mainframe, text="Please choose which script you want to run:").grid(row=1, column=1)
popupMenu.grid(row=2, column=1)
def ok():
script = tkvar.get()
if script == 'Outlook Download':
create_window()
elif script == 'Outlook SFDC Upload':
root.withdraw()
wb = xlrd.open_workbook('C:\\SFDC Outlook Synchronization\\SFDC_Admin\\SFDC_Admin.xlsx')
first_sheet = wb.sheet_by_name("Sheet1")
a1 = first_sheet.cell(0, 1).value
a2 = first_sheet.cell(1, 1).value
a3 = first_sheet.cell(2, 1).value
try:
sf = Salesforce(username=a1, password=a2, security_token=a3)
except Exception:
win32api.MessageBox(0,
"The script cannot run. You need to either 1) Update your Salesforce password (in cell B2) in the following file: C:\SFDC Outlook Synchronization\SFDC_Admin\SFDC_Admin.xlsx and save the file or 2) Check your Internet connectivity.",
"Error!",
0x00001000)
root.destroy()
quit()
for wbb in xl.Workbooks:
if wbb.Name == 'Outlook Sync.xlsx':
wbb.Close(True)
wb = load_workbook(filename="C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx", read_only=False,keep_vba=False)
ws = wb.get_sheet_by_name('Outlook Sync')
file = pd.ExcelFile('C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx')
df = file.parse('Outlook Sync')
#df.sort_values(['Upload Event to SFDC?'], ascending=[False], inplace=True)
g = 1
for index, row in df.iterrows():
try:
if df.iloc[index]['Upload Event to SFDC?'] == "No" or pd.isnull(df.iloc[index]['Upload Event to SFDC?']):
g = g + 1
continue
if "Yes" in ws.cell(row=g + 1, column=17).value:
g = g + 1
continue
except(Exception):
pass
try:
g = g + 1
if "'" not in df.iloc[index]['SFDC Contact']:
contact = "'" + df.iloc[index]['SFDC Contact'] + "'"
else:
contact = df.iloc[index]['SFDC Contact']
query_result2 = sf.query_all("SELECT Id FROM Contact Where Email = %s" % contact)
records2 = query_result2['records']
df2 = pd.DataFrame(records2)
df2.drop('attributes', inplace=True, axis=1)
if "'" not in df.iloc[index]['Assigned To']:
owner = "'" + df.iloc[index]['Assigned To'] + "'"
else:
owner = df.iloc[index]['Assigned To']
query_result4 = sf.query_all("SELECT Id FROM User Where Email = %s" % owner)
records4 = query_result4['records']
df4 = pd.DataFrame(records4)
df4.drop('attributes', inplace=True, axis=1)
if pd.isnull(df.iloc[index]['Location']):
location = ''
else:
location = df.iloc[index]['Location']
if pd.isnull(df.iloc[index]['Appointment Body']):
description = ''
else:
description = df.iloc[index]['Appointment Body']
if pd.isnull(df.iloc[index]['SFDC Opportunity']):
oppty = ''
else:
oppty = "'" + df.iloc[index]['SFDC Opportunity'] + "'"
query_result3 = sf.query_all("SELECT Id FROM Opportunity Where Name = %s" % oppty)
records3 = query_result3['records']
df3 = pd.DataFrame(records3)
df3.drop('attributes', inplace=True, axis=1)
oppty = df3.iloc[0]['Id']
if pd.isnull(df.iloc[index]['Additional Participant #1']):
additionalp1 = ""
additionalp1id = ""
else:
additionalp1 = df.iloc[index]['Additional Participant #1']
additionalp1sfdc = "'" + additionalp1 + "'"
try:
query_result5 = sf.query_all("SELECT Id FROM User Where Name = %s" % additionalp1sfdc)
records5 = query_result5['records']
df5 = pd.DataFrame(records5)
df5.drop('attributes', inplace=True, axis=1)
additionalp1id = df5.iloc[0]['Id']
except(Exception):
additionalp1id = ""
if pd.isnull(df.iloc[index]['Additional Participant #2']):
additionalp2 = ""
additionalp2id = ""
else:
additionalp2 = df.iloc[index]['Additional Participant #2']
additionalp2sfdc = "'" + additionalp2 + "'"
try:
query_result6 = sf.query_all("SELECT Id FROM User Where Name = %s" % additionalp2sfdc)
records6 = query_result6['records']
df6 = pd.DataFrame(records6)
df6.drop('attributes', inplace=True, axis=1)
additionalp2id = df6.iloc[0]['Id']
except(Exception):
additionalp2id = ""
if pd.isnull(df.iloc[index]['Additional Participant #3']):
additionalp3 = ""
additionalp3id = ""
else:
additionalp3 = df.iloc[index]['Additional Participant #3']
additionalp3sfdc = "'" + additionalp3 + "'"
try:
query_result7 = sf.query_all("SELECT Id FROM User Where Name = %s" % additionalp3sfdc)
records7 = query_result7['records']
df7 = pd.DataFrame(records7)
df7.drop('attributes', inplace=True, axis=1)
additionalp3id = df7.iloc[0]['Id']
except(Exception):
additionalp3id = ""
local = pytz.timezone("Australia/Sydney")
startdate = dt.datetime.strptime(df.iloc[index]['Start'], "%Y-%B-%d %I:%M%p")
local_dt = local.localize(startdate, is_dst=None)
startdate = local_dt.astimezone(pytz.utc)
startdate = startdate.strftime("%Y-%m-%dT%H:%M:%S")
enddate = dt.datetime.strptime(df.iloc[index]['End'], "%Y-%B-%d %I:%M%p")
local_dt = local.localize(enddate, is_dst=None)
enddate = local_dt.astimezone(pytz.utc)
enddate = enddate.strftime("%Y-%m-%dT%H:%M:%S")
if df.iloc[index]['Event Type'] == "Customer.Value.Assurance":
cv = 'Customer Value Assurance'
else:
cv = df.iloc[index]['Event Type']
if cv == "Customer Value Assurance" and (df.iloc[index]['Event Sub-Type'] != "Onboarding Conversation" and df.iloc[index]['Event Sub-Type'] != "Service Orientation" and df.iloc[index]['Event Sub-Type'] != "Mid-Year Check In" and df.iloc[index]['Event Sub-Type'] != "Relationship Building" and df.iloc[index]['Event Sub-Type'] != "Recruiting"):
ws.cell(row=g,column=17).value = "No - your event type and event sub-type do not align. Please fix this and try to upload again."
ft = Font()
ft.underline = 'none' # add single underline
ft.color = Color(rgb='FF000000') # add blue color
ws.cell(row=g, column=17).font = ft
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
continue
if cv == "Commercial" and (df.iloc[index]['Event Sub-Type'] != "Prospecting" and df.iloc[index]['Event Sub-Type'] != "Sales Introduction" and df.iloc[index]['Event Sub-Type'] != "Sales Follow Up" and df.iloc[index]['Event Sub-Type'] != "Closing Call"):
ws.cell(row=g,column=17).value = "No - your event type and event sub-type do not align. Please fix this and try to upload again."
ft = Font()
ft.underline = 'none' # add single underline
ft.color = Color(rgb='FF000000') # add blue color
ws.cell(row=g, column=17).font = ft
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
continue
if cv == "Consultation" and (df.iloc[index]['Event Sub-Type'] != "Interview" and df.iloc[index]['Event Sub-Type'] != "Prewire/Scoping" and df.iloc[index]['Event Sub-Type'] != "Advisory/Content Review" and df.iloc[index]['Event Sub-Type'] != "Results Delivery"):
ws.cell(row=g,column=17).value = "No - your event type and event sub-type do not align. Please fix this and try to upload again."
ft = Font()
ft.underline = 'none' # add single underline
ft.color = Color(rgb='FF000000') # add blue color
ws.cell(row=g, column=17).font = ft
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
continue
results = sf.Event.create({'OwnerId': df4.iloc[0]['Id'], 'WhoId': df2.iloc[0]['Id'], 'WhatId': oppty, 'Event_Type__c': cv, 'Event_Sub_Type__c': df.iloc[index]['Event Sub-Type'],
'Channel__c': df.iloc[index]['Channel'], 'Status__c': df.iloc[index]['Event Status'], 'Subject': df.iloc[index]['Subject'],
'ShowAs': 'Busy','StartDateTime': startdate, 'EndDateTime': enddate, 'Location': location, 'Description': description,
'Additional_Participant_1_Login__c': additionalp1, 'Additional_Participant_1_Id__c': additionalp1id,
'Additional_Participant_2_Login__c': additionalp2, 'Additional_Participant_2_Id__c': additionalp2id,
'Additional_Participant_3_Login__c': additionalp3, 'Additional_Participant_3_Id__c': additionalp3id, 'Maps_Category__c': df.iloc[index]['Outlook Id']})
link = "https://na29.salesforce.com/" + results['id']
ws.cell(row=g, column=17).value = '=HYPERLINK("{}", "{}")'.format(link, "Yes")
ws.cell(row=g, column=19).value = '=HYPERLINK("{}", "{}")'.format(link, "Yes - has been just uploaded or previously uploaded")
ft = Font()
ft.underline = 'single' # add single underline
ft.color = Color(rgb='000000FF') # add blue color
ws.cell(row=g, column=17).font = ft
ws.cell(row=g, column=19).font = ft
FILL_Green = PatternFill(start_color='00FF00',
end_color='00FF00',
fill_type='solid')
for row in ws['A'+ str(g) +":"+"S"+ str(g)]:
for cell in row:
cell.fill = FILL_Green
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
continue
except(Exception):
ws.cell(row=g, column=17).value = "No - either the contact/opportunity doesn't exist in SFDC or you left a key field such as 'Event Type' blank or incorrectly spelled. Please make edits and try uploading again."
ft = Font()
ft.underline = 'none' # add single underline
ft.color = Color(rgb='FF000000') # add blue color
ws.cell(row=g, column=17).font = ft
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
continue
wb15 = load_workbook(filename='C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx')
wb15.save("C:\\SFDC Outlook Synchronization\\Previous Uploads\\Outlook Sync - " + datenow.strftime("%d %B %Y %I-%M %p") + ".xlsx")
wb2 = xl.Workbooks.Open('C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx')
xl.Visible = True
win32api.MessageBox(0,
"The Outlook-SFDC upload process is complete! Please check the events you created by clicking the hyperlinks under the column 'Upload Sucessful?'",
"Outlook-SFDC Upload Process Complete!", 0x00001000)
root.destroy()
def rangeok(e1, e2, e3, e4, e5, e6, name, window):
f1 = int(e1.get())
f2 = int(e2.get())
f3 = int(e3.get())
f4 = int(e4.get())
f5 = int(e5.get())
f6 = int(e6.get())
internal = tkvar2.get()
shared = str(name.get())
if 'withdrawn' == root.state():
try:
begin = dt.datetime(f3, f2, f1, 0, 0)
begin = begin.strftime("%Y-%m-%d %H:%M")
end = dt.datetime(f6, f5, f4, 23, 59)
end = end.strftime("%Y-%m-%d %H:%M")
if begin <= end:
download(begin, end, internal, shared, window)
else:
win32api.MessageBox(0,
"The start date is greater than the end date. Please fix this and click 'Ok' again.",
"Error!",
0x00001000)
except(ValueError):
win32api.MessageBox(0,
"The date(s) you inputted are not real date(s). Please input real date(s) and click 'Ok' again. =)",
"Error!",
0x00001000)
def create_window():
window = tk.Toplevel(root)
window.geometry('%dx%d+%d+%d' % (355, 315, x, y))
Label(window, text="Select from the drop-down what you want to download:").place(x=0, y=0, width=350)
choices = {'Today', 'Past Week', 'Past Month', 'Next Week', 'Next Month'}
tkvar.set('Today') # set the default option
popupMenu = OptionMenu(window, tkvar, *choices)
popupMenu.place(x=70, y=25, width=100)
Label(window, text="OR", font = 'bold', foreground ='red').place(x=155, y=80, width=50)
Label(window, text="Input the start/end dates of what you want to download:").place(x=0, y=125, width=350)
Label(window, text="Start Date").place(x=5, y=150, width=50)
Label(window, text="End Date").place(x=180, y=150, width=50)
e1 = Entry(window, justify='center')
e2 = Entry(window, justify='center')
e3 = Entry(window, justify='center')
e4 = Entry(window, justify='center')
e5 = Entry(window, justify='center')
e6 = Entry(window, justify='center')
e1.place(x=60, y=150, width=25)
Label(window, text="DD").place(x=60, y=170, width=25)
e1.insert(0, lastmonth.day)
e2.place(x=90, y=150, width=25)
Label(window, text="MM").place(x=90, y=170, width=25)
e2.insert(0, lastmonth.month)
e3.place(x=120, y=150, width=40)
Label(window, text="YYYY").place(x=120, y=170, width=40)
e3.insert(0, lastmonth.year)
e4.place(x=240, y=150, width=25)
Label(window, text="DD").place(x=240, y=170, width=25)
e4.insert(0, datenow.day)
e5.place(x=270, y=150, width=25)
Label(window, text="MM").place(x=270, y=170, width=25)
e5.insert(0, datenow.month)
e6.place(x=300, y=150, width=45)
Label(window, text="YYYY").place(x=300, y=170, width=40)
e6.insert(0, datenow.year)
rangeb = Button(window, text="Ok", bg='pale green', command=lambda: rangeok(e1, e2, e3, e4, e5, e6, name, window))
rangeb.place(x=165, y=185, width=35)
Label(window, text="-------------------------------Optional-------------------------------").place(x=0, y=235)
Label(window, text="Include Internal Planners?").place(x=1, y=265, width=150)
choices2 = {'Yes', 'No'}
tkvar2.set('No') # set the default option
popupMenu2 = OptionMenu(window, tkvar2, *choices2)
popupMenu2.place(x=50, y=282, height = 27, width=50)
Label(window, text="Download a Shared Calendar?").place(x=185, y=265, width=160)
Label(window, text="Write in Name").place(x=170, y=287, width=80)
name = Entry(window)
name.place(x=255, y=287, width=95)
name.insert(0, "")
choiceb = Button(window, text="Ok", bg='pale green', command=lambda: choiceok(name, window))
choiceb.place(x=225, y=28, width=35)
root.withdraw()
window.protocol('WM_DELETE_WINDOW', exit)
def exit():
root.destroy()
def download(begin, end, internal, shared, window):
window.withdraw()
wb = xlrd.open_workbook('C:\\SFDC Outlook Synchronization\\SFDC_Admin\\SFDC_Admin.xlsx')
first_sheet = wb.sheet_by_name("Sheet1")
a1 = first_sheet.cell(0, 1).value
a2 = first_sheet.cell(1, 1).value
a3 = first_sheet.cell(2, 1).value
try:
sf = Salesforce(username=a1, password=a2, security_token=a3)
except Exception:
win32api.MessageBox(0,
"The script cannot run. You need to either 1) Update your Salesforce password (in cell B2) in the following file: C:\SFDC Outlook Synchronization\SFDC_Admin\SFDC_Admin.xlsx and save the file or 2) Check your Internet connectivity.",
"Error!",
0x00001000)
root.destroy()
quit()
for wbb in xl.Workbooks:
if wbb.Name == 'Outlook Sync.xlsx':
wbb.Close(True)
wb = load_workbook(filename="C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx", read_only=False, keep_vba=False)
ws = wb.get_sheet_by_name('Outlook Sync')
FILL_NONE = PatternFill(start_color='FFFFFF',
end_color='FFFFFF',
fill_type='none')
for row in ws['A2:S1000']:
for cell in row:
cell.value = None
cell.fill = FILL_NONE
wb.save("C:\\SFDC Outlook Synchronization\\Outlook Sync.xlsx")
if shared == "":
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
calendar = outlook.GetDefaultFolder(9)
appts = calendar.Items
else:
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
myRecipient = outlook.CreateRecipient(shared)
myRecipient.Resolve()
if myRecipient.Resolved == True:
try:
othercalendar = outlook.GetSharedDefaultFolder(myRecipient, 9)
appts = othercalendar.Items
except(Exception):
win32api.MessageBox(0,
"You don't have access to the shared calendar you inputted. Please ask the person for access to their calendar and try again.",
"Error",
0x00001000)
root.destroy()
quit()
else:
win32api.MessageBox(0,
"The name you inputted for their shared calendar does not exist in the corporate directory. Please choose a different name.",
"Error",
0x00001000)
root.destroy()
quit()
datenow = dt.datetime.today()
try:
appts.Sort("[Start]")
appts.IncludeRecurrences = True
appts = appts.Restrict("[Start] >= '" + begin + "' AND [Start] <= '" + end + "'")
except(Exception):
win32api.MessageBox(0,
"An error occured trying to download the shared calendar. Please ensure you have the correct permissions and try again.",
"Error",
0x00001000)
root.destroy()
quit()
local = pytz.timezone("Australia/Sydney")
local_dt = local.localize(datenow, is_dst=None)
datenow = local_dt.astimezone(pytz.utc)
g = 1
for appt in appts:
attlist = []
otherlist = []
opptylist = []
confirmedlist = []
finallist = []
ceblist = []
wodkalist = []
attendee = appt.RequiredAttendees
attlist = attendee.split("; ")
for att in attlist:
search_string = att
recipient = outlook.Session.CreateRecipient(search_string)
recipient.Resolve()
if recipient.Resolved == True:
ae = recipient.AddressEntry
email_address = None
try:
if 'EX' == ae.Type:
eu = ae.GetExchangeUser()
email_address = eu.PrimarySmtpAddress
if 'SMTP' == ae.Type:
email_address = ae.Address
if "cebglobal.com" not in email_address and "gartner.com" not in email_address and "evanta.com" not in email_address and "executiveboard.com" not in email_address:
otherlist.append(email_address)
else:
if email_address == a1 and shared == '':
continue
attsplit = att.split(", ")
attsplit = attsplit[1] + " " + attsplit[0]
if attsplit == shared and shared != '':
continue
ceblist.append(attsplit)
except(Exception):
pass
if otherlist != []:
for attd in otherlist:
if confirmedlist != []:
break
if "'" not in attd:
contact = "'" + attd + "'"
else:
contact = attd
query_result1 = sf.query_all("SELECT Id, (SELECT OpportunityId From OpportunityContactRoles) FROM Contact Where Email = %s" % contact)
records1 = query_result1['records']
df1 = pd.DataFrame(records1)
if df1.empty:
continue
else:
opptylist.append(attd)
df1.drop('attributes', inplace=True, axis=1)
for index, row in df1.iterrows():
name = (row['OpportunityContactRoles'])
if name == None:
finallist.append(attd)
break
else:
confirmedlist.append(attd)
g = g + 1
df1.loc[index, 'OpportunityContactRoles'] = name['records'][0]['OpportunityId']
ws.cell(row=g, column=6).value = attd
oppty = "'" + df1.iloc[0]['OpportunityContactRoles'] + "'"
query_result2 = sf.query_all("SELECT Name FROM Opportunity Where Id = %s" % oppty)
records2 = query_result2['records']
df2 = pd.DataFrame(records2)
ws.cell(row=g, column=7).value = df2.iloc[0]['Name']
break
if confirmedlist == [] and finallist !=[]:
for attd in finallist:
if "'" not in attd:
othercontact = "'" + attd + "'"
else:
othercontact = attd
query_result3 = sf.query_all("SELECT Opportunity_Name__c FROM OCCR__c Where Contact_Email__c = %s" % othercontact)
records3 = query_result3['records']
df3 = | pd.DataFrame(records3) | pandas.DataFrame |
import pdb
import unittest
import torch
import pandas as pd
import numpy as np
from agents.SACAgent import SACAgent
from cobs.model import Model
from test.test_config import state_name, sac_network_map, eplus_naming_dict, eplus_var_types, \
SatAction, BlindActionSingleZone, ThermActionSingleZone, BlindActionMultiZone,\
ThermActionMultiZone
from utils.rewards import ViolationPActionReward
# SatAction = ActionCreator("Schedule:Constant", "Schedule Value", "SAT_SP")
# BlindActionSingle = ActionCreator("Schedule:Constant", "Schedule Value", "WF-1_shading_schedule")
# ThermActionSingle = ActionCreator("Zone Temperature Control", "Heating Setpoint", "SPACE1-1")
class SACTest(unittest.TestCase):
agent_params = {
"policy_type": "Gaussian",
"gamma": 0.99,
"tau": 0.005,
"lr": 0.0003,
"batch_size": 2,
"hidden_size": 2,
"updates_per_step": 1,
"target_update_interval": 1,
"replay_size": 200,
"cuda": False,
"step": 300 * 3,
"start_steps": 5,
"alpha": 0.2,
"automatic_entropy_tuning": False,
"num_inputs": len(state_name),
"min_sat_action": -20,
"max_sat_action": 20,
"seed": 42
}
eplus_path = '/Applications/EnergyPlus-9-' \
'3-0-bugfix/'
# idf_path = 'test/eplus_files/test_control.idf'
# idf_path = 'test/eplus_files/5Zone_Control_SAT.idf'
epw_path = 'test/eplus_files/test.epw'
Model.set_energyplus_folder(eplus_path)
def test_sac_sat(self):
self.agent_params["num_sat_actions"] = 1
self.agent_params["num_blind_actions"] = 0
self.agent_params["num_therm_actions"] = 0
network = sac_network_map['leaky']
agent = SACAgent(self.agent_params, network, chkpt_dir='test/agent_tests/test_results')
ep_model = self.setup_env('test/eplus_files/test_control.idf')
observations, actions, agent = self.run_episode(ep_model, agent, "SAT_SP")
obs_test = pd.DataFrame.from_dict(observations)
sat_actions, therm_actions, blind_actions = actions
# pdb.set_trace()
obs_test['actions'] = [a1 for a1, _ in sat_actions]
obs_test['sat_stpts'] = [a2.item() for _, a2 in sat_actions]
# obs_test['blind_actions'] = blind_actions
float_cols = [
'Outdoor Temp.',
'Diff. Solar Rad.',
'Direct Solar Rad.',
'Indoor Temp.',
'Indoor Temp. Setpoint',
'PPD',
'Occupancy Flag',
'Heat Coil Power',
'HVAC Power',
'Sys Out Temp.',
'MA Temp.',
'actions',
'sat_stpts'
]
obs_true = pd.read_csv('test/agent_tests/saved_results/sac_no_blinds_obs.csv')
for c in float_cols:
close = np.isclose(obs_test[c], obs_true[c])
self.assertTrue(close.all())
def test_sac_blinds_one_zone(self):
self.agent_params["num_sat_actions"] = 1
self.agent_params["num_blind_actions"] = 1
self.agent_params["num_therm_actions"] = 0
network = sac_network_map['leaky']
agent = SACAgent(self.agent_params, network, chkpt_dir='test/agent_tests/test_results')
ep_model = self.setup_env('test/eplus_files/test_control.idf')
ep_model.set_blinds(
["WF-1"],
blind_material_name="White Painted Metal Blind",
agent_control=True
)
observations, actions, agent = self.run_episode(ep_model, agent, 'SAT_SP')
obs_test = pd.DataFrame.from_dict(observations)
sat_actions, therm_actions, blind_actions = actions
obs_test['actions'] = [a1 for a1, _ in sat_actions]
obs_test['sat_stpts'] = [a2.item() for _, a2 in sat_actions]
obs_test['blind_actions'] = [a1[0] for a1 in blind_actions]
# obs_test.to_csv('test/agent_tests/test_results/sac_blinds_obs.csv', index=False)
float_cols = [
'Outdoor Temp.',
'Diff. Solar Rad.',
'Direct Solar Rad.',
'Indoor Temp.',
'Indoor Temp. Setpoint',
'PPD',
'Occupancy Flag',
'Blind Angle Zone 1',
'Heat Coil Power',
'HVAC Power',
'Sys Out Temp.',
'MA Temp.',
'actions',
'sat_stpts',
'blind_actions'
]
obs_true = pd.read_csv('test/agent_tests/saved_results/sac_blinds_obs.csv')
for c in float_cols:
close = np.isclose(obs_test[c], obs_true[c])
self.assertTrue(close.all())
def test_sac_blinds_many_zone_single_setpoint(self):
self.agent_params["num_sat_actions"] = 1
self.agent_params["num_blind_actions"] = 1
self.agent_params["num_therm_actions"] = 0
network = sac_network_map['leaky']
agent = SACAgent(self.agent_params, network, chkpt_dir='test/agent_tests/test_results')
ep_model = self.setup_env('test/eplus_files/5Zone_Control_SAT_no_windowcontrol.idf')
ep_model.set_blinds(
["WF-1", "WB-1", "WL-1", "WR-1"],
blind_material_name="White Painted Metal Blind",
agent_control=True
)
# pdb.set_trace()
observations, actions, agent = self.run_episode(ep_model, agent, 'SAT_SP', blinds_zone_multi=True)
obs_test = pd.DataFrame.from_dict(observations)
sat_actions, therm_actions, blind_actions = actions
obs_test['actions'] = [a1 for a1, _ in sat_actions]
obs_test['sat_stpts'] = [a2.item() for _, a2 in sat_actions]
obs_test['blind_actions'] = [a1[0] for a1 in blind_actions]
self.assertEqual(len(blind_actions[0]), 1)
self.assertTrue(obs_test['Blind Angle Zone 1'].equals(obs_test['Blind Angle Zone 2']))
self.assertTrue(obs_test['Blind Angle Zone 1'].equals(obs_test['Blind Angle Zone 3']))
self.assertTrue(obs_test['Blind Angle Zone 1'].equals(obs_test['Blind Angle Zone 4']))
float_cols = [
'Outdoor Temp.',
'Diff. Solar Rad.',
'Direct Solar Rad.',
'Indoor Temp.',
'Indoor Temp. Setpoint',
'PPD',
'Occupancy Flag',
'Blind Angle Zone 1',
'Heat Coil Power',
'HVAC Power',
'Sys Out Temp.',
'MA Temp.',
'actions',
'sat_stpts',
'blind_actions'
]
obs_true = | pd.read_csv('test/agent_tests/saved_results/sac_blinds_multi_zone_single_stpt_obs.csv') | pandas.read_csv |