repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
KECB/learn | computer_vision/12_rmv_salt_pepper_median_blur.py | 1 | 1464 | import numpy as np
import cv2
import matplotlib.pyplot as plt
# load in image and add Salt and pepper noise
moon = cv2.imread('images/moon.png', 0)
######################################################## ADD SALT & PEPPER NOISE
# salt and peppering manually (randomly assign coords as either white or black)
rows, cols = moon.shape
salt_vs_pepper_ratio = 0.5
amount = 0.01
moon_salted_and_peppered = moon.copy()
num_salt = np.ceil(amount * moon.size * salt_vs_pepper_ratio)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in moon.shape]
moon_salted_and_peppered[coords] = 255
num_pepper = np.ceil(amount * moon.size * (1 - salt_vs_pepper_ratio))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in moon.shape]
moon_salted_and_peppered[coords] = 0
############################################ APPLY MEDIAN FILTER TO REMOVE NOISE
# The second argument is the aperture linear size; it must be odd and greater
# than 1, for example: 3, 5, 7
moon_median = cv2.medianBlur(moon, 3)
# show all three images using Matplotlib
plt.figure(figsize=(15, 6))
plt.subplot(1, 3, 1)
plt.imshow(moon, cmap='gray'), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(moon_salted_and_peppered, cmap='gray')
plt.title('Salted & Peppered'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(moon_median, cmap='gray'), plt.title('Median Blur on S&P')
plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
| mit |
mop/LTPTextDetector | scripts/pw_analyze/svmdelme.py | 1 | 5600 | import numpy as np
from sklearn.cross_validation import cross_val_score, ShuffleSplit
from sklearn.svm import LinearSVC, SVC
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_recall_fscore_support
import matplotlib.pyplot as plt
data = np.genfromtxt('dists_cleaned.csv', delimiter=',')
#samples = np.bitwise_or(np.bitwise_and(data[:,0] == -1, data[:,1] <= 2), data[:,0]==1)
#data = data[samples,:]
#data = data[data[:,7]>=5,:]
pos = data[data[:,0]==1,:]
neg = data[data[:,0]==-1,:]
a = 0.33974138
b = 0.47850904
c = -0.56307525
xrg = np.linspace(0,5,100)
yrg = np.linspace(0,5,100)
X,Y = np.meshgrid(xrg,yrg)
Z = a * X + b * Y + c
print Z
plt.scatter(neg[:,3], neg[:,4], color='r')
plt.scatter(pos[:,3], pos[:,4], color='b')
plt.contour(X,Y,Z)
plt.xlabel('medians')
plt.ylabel('heights')
plt.show()
#data[data[:,0]==1,0] = 2
#data[data[:,0]==-1,0] = 1
#data[data[:,0]==2,0] = -1
#print data.shape
#data = data[data[:,1]>0,:]
#data = data[data[:,2]>0,:]
#print data.shape
cv = ShuffleSplit(data.shape[0], n_iter=10, random_state=4)
min_dists = [1,2,3,4,5,100]
c1_grid = np.logspace(0,2,10)
c2_grid = np.logspace(0,4,15)
class_weights = {1:5, -1:1}
beta = 1.0
#best_params = {}
#best_fscore = 0
#for d in min_dists:
# for C1 in c1_grid:
# for C2 in c2_grid:
# precisions = []
# recalls = []
# fscores = []
# accs = []
# for (train_idx, test_idx) in cv:
# X = data[train_idx,3:5]
# y = data[train_idx,0]
#
# svm1 = LinearSVC(random_state=42, C=C1, class_weight=class_weights)
# svm1.fit(X,y)
#
# X_simple = data[train_idx, 4]
# X_simple = X_simple.reshape((train_idx.shape[0], 1))
# svm2 = LinearSVC(random_state=42, C=C2, class_weight=class_weights)
# svm2.fit(X_simple, y)
#
# #ys = svm2.predict(data[test_idx, 4].reshape((test_idx.shape[0], 1)))
# #accs.append(svm2.score(data[test_idx, 4].reshape((test_idx.shape[0], 1)), data[test_idx,0]))
#
# ys = np.zeros((test_idx.shape[0],))
# for i,idx in enumerate(test_idx):
# if data[idx,-1] >= d:
# ys[i] = svm1.predict(data[idx, 3:5].reshape((1,2)))
# else:
# ys[i] = svm2.predict(data[idx, 4].reshape((1,1)))
#
# acc = np.sum(data[test_idx,0] == ys) / float(test_idx.shape[0])
# accs.append(acc)
# ps, rs, fs, ss = precision_recall_fscore_support(data[test_idx,0], ys, beta=beta)
# precisions.append(ps[1])
# recalls.append(rs[1])
# fscores.append(fs[1])
# print 'C1: %f, C2: %f, d: %f, prec: %f, recall: %f, f-score: %f, acc: %f' % (C1, C2, d, np.mean(precisions), np.mean(recalls), np.mean(fscores), np.mean(accs))
# if np.mean(fscores) > best_fscore:
# print '*'
# best_fscore = np.mean(fscores)
# best_params = {
# 'C1': C1,
# 'C2': C2,
# 'd': d
# }
#
#best_params = {}
#best_fscore = 0
#print data.shape
#for C1 in c1_grid:
# precisions = []
# recalls = []
# fscores = []
# accs = []
# for (train_idx, test_idx) in cv:
# X = data[train_idx,3:5]
# y = data[train_idx,0]
#
# svm1 = LinearSVC(random_state=4,C=C1, class_weight=class_weights)
# svm1.fit(X,y)
#
# ys = np.zeros((test_idx.shape[0],))
# for i,idx in enumerate(test_idx):
# ys[i] = svm1.predict(data[idx, 3:5].reshape((1,2)))
#
# acc = np.sum(data[test_idx,0] == ys) / float(test_idx.shape[0])
# accs.append(acc)
# ps, rs, fs, ss = precision_recall_fscore_support(data[test_idx,0], ys, beta=beta)
# precisions.append(ps[1])
# recalls.append(rs[1])
# fscores.append(fs[1])
# svm = LinearSVC(random_state=4,C=C1, class_weight=class_weights)
# svm.fit(data[:,3:5], data[:,0])
# print 'C1: %f, prec: %f, recall: %f, f-score: %f, acc: %f' % (C1, np.mean(precisions), np.mean(recalls), np.mean(fscores), np.mean(accs))
# print svm.coef_
# print svm.intercept_
# if np.mean(fscores) > best_fscore:
# print '*'
# best_fscore = np.mean(fscores)
# best_params = {
# 'C1': C1
# }
#print 'best:'
#print best_params
#svm = SVC(kernel='linear', C=best_params['C1'], class_weight=class_weights)
#svm.fit(data[:,1:3], data[:,0])
#print svm.coef_
#print svm.intercept_
#svm = SVC(kernel='linear', C=best_params['C1'], class_weight=class_weights)
#svm.fit(data[:,3:5], data[:,0])
#print svm.coef_
#print svm.intercept_
#svm = SVC()
#search = GridSearchCV(svm, {
# 'kernel': ('rbf',), 'C': [1,10,100,1000],
# 'gamma': [0.05, 0.1, 0.25, 0.128, 0.5, 1.0, 1.5],
# 'class_weight': ({1:1,-1:1},),
# }, cv=cv)
svm = LinearSVC()
search = GridSearchCV(svm, {
'C': np.logspace(0,4,15).tolist(),
'class_weight': (
{1:1,1:1},
{1:1,-1:2},
{1:1,-1:3},
{1:1,-1:4},
{-1:1,1:2},
{-1:1,1:3},
{-1:1,1:5},{1:4,-1:1},{1:5,-1:1})
}, cv=cv, refit=False)
search.fit(data[:,3:5], data[:,0],cv=cv)
print search
print search.best_score_
print search.best_params_
svm = LinearSVC(random_state=42,**search.best_params_)
svm.fit(data[:,1:3],data[:,0])
print svm.score(data[:,1:3],data[:,0])
print svm.coef_
print svm.intercept_
| gpl-3.0 |
vorasagar7/sp17-i524 | project/S17-IR-P001/code/ansible/ansible-node/files/visualization/FinalScript.py | 4 | 15339 | #Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import pandas as pd
from textblob import TextBlob
from time import strptime
import numpy as np
import re
import time
import zipcode
import sys, errno
from nltk.corpus import stopwords
from itertools import combinations
from collections import Counter
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import nltk
nltk.download('stopwords')
runCount=0
#Variables that contains the user credentials to access Twitter API
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
tweets_data = []
stop = stopwords.words('english') + ['and']
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
Count=0
stop = stopwords.words('english')
def create_dataframe(tweets_data):
tweets = pd.DataFrame(index=range(len(tweets_data)),
columns=['text','created_at','location','state','sentiment','sentiment_cat','country_code','hour'])
for i in range(len(tweets_data)):
try:
tweets['text'][i] = tweets_data[i]['text']
except:
tweets['text'][i] = ""
try:
tweets['location'][i]=tweets_data[i]['user']['location']
except:
tweets['location'][i]='NA'
try:
tweets['country_code'][i]=tweets_data[i]['place']['country_code']
except:
tweets['country_code'][i]=''
try:
lon=tweets_data[i]['place']['bounding_box']['coordinates'][0][0][0]
except:
lon='NA'
try:
lat=tweets_data[i]['place']['bounding_box']['coordinates'][0][0][1]
except:
lat='NA'
#print (lat,lon)
try:
tweets['created_at'][i]=tweets_data[i]['created_at']
except:
tweets['created_at'][i]='NA'
try:
tweets['hour'][i]=tweets['created_at'][i][11:13]
except:
tweets['hour'][i]='NA'
try:
stateFromData=tweets['location'][i].split(',')[1]
except:
stateFromData=''
if len(stateFromData)==2:
tweets['state'][i]=stateFromData
else:
if lat!='NA':
radius=10
incre=10
zips=zipcode.isinradius((lat,lon),radius)
while len(zips)==0:
radius=radius+incre
zips=zipcode.isinradius((lat,lon),radius)
incre=incre+10
myzip = zipcode.isequal(str(zips[0].zip))
tweets['state'][i]=myzip.state
else:
tweets['state'][i]='NA'
blob = TextBlob(tweets['text'][i])
try:
sentence=blob.sentences[0]
tweets['sentiment'][i]=float(sentence.sentiment.polarity)
except:
tweets['sentiment'][i]=0
if tweets['sentiment'][i] < 0:
tweets['sentiment_cat'][i] = 'Neg'
else:
if tweets['sentiment'][i] > 0:
tweets['sentiment_cat'][i] = 'Pos'
else:
tweets['sentiment_cat'][i] = 'Neu'
print (tweets.head())
return tweets
def state_senti(newFolder,usStateSentiOld,tweetsFinal):
output2=pd.DataFrame({'value' : tweetsFinal.groupby( [ "State","sentiment_cat"] ).size()}).reset_index()
outData=pd.pivot_table(output2,values='value', index=['State'], columns=['sentiment_cat'], aggfunc=np.sum)
outData=outData.fillna(0)
outData['State']=outData.index
#outData.reset_index()
print (outData.columns.values)
outData = pd.merge(usStateSentiOld, outData, how='left', left_on='State', right_on = 'State')
outData=outData.fillna(0)
outData['Pos']=outData['Pos_x']+outData['Pos_y']
del outData['Pos_x']
del outData['Pos_y']
outData['Neg']=outData['Neg_x']+outData['Neg_y']
del outData['Neg_x']
del outData['Neg_y']
outData['Neu']=outData['Neu_x']+outData['Neu_y']
del outData['Neu_x']
del outData['Neu_y']
outData.to_csv(newFolder+"usStates-SentiCount.csv",index=False)
#-------------------------------------------
try:
outData['sum']=outData[['Neg', 'Neu', 'Pos']].sum(axis=1)
outData['max']=outData['maxFinal']=outData[['Neg', 'Neu', 'Pos']].idxmax(axis=1)
except:
outData['sum']=outData[['Neu', 'Pos']].sum(axis=1)
outData['max']=outData['maxFinal']=outData[[ 'Neu', 'Pos']].idxmax(axis=1)
#-------------------------------------------
for i in range(len(outData)):
if outData['max'][i] =="Pos":
outData['maxFinal'][i] = '1'
else:
if outData['max'][i] =="Neu":
outData['maxFinal'][i] = '-1'
else:
outData['maxFinal'][i] = '2'
del outData['max']
d="var data =[\n"
for i in range(len(outData)):
row=outData.ix[i]
#print (row)
d += "[\'"+row['State']+"\',"+",".join([str(i) for i in row[:5]])+"],\n"
return d+']'
def create_timechart(newFolder,oldtimedata,tweets):
td1 = pd.DataFrame({'value' : tweets.groupby( [ "created_at"] ).size()}).reset_index()
td1['created_at'] = td1['created_at'].astype('str')
mask = (td1['created_at'].str.len() > 2)
td1=td1.loc[mask]
timedata = td1[td1.created_at != 'NA']
timedata=oldtimedata.append(timedata, ignore_index=True)
timedata.to_csv(newFolder+"timeseries.csv",index=False)
data1 ={}
data = ["var data=["]
for i in range(0,len(timedata)):
year = timedata['created_at'][i][-4:]
if (timedata['created_at'][i][4:7] == 'Jan'):
mon = '1'
else:
if (timedata['created_at'][i][4:7] == 'Feb'):
mon = '2'
else:
if (timedata['created_at'][i][4:7] == 'Mar'):
mon = '3'
else:
if (timedata['created_at'][i][4:7] == 'Apr'):
mon = '4'
else:
if (timedata['created_at'][i][4:7] == 'May'):
mon = '5'
else:
if (timedata['created_at'][i][4:7] == 'Jun'):
mon = '6'
else:
if (timedata['created_at'][i][4:7] == 'Jul'):
mon = '7'
else:
if (timedata['created_at'][i][4:7] == 'Aug'):
mon = '8'
else:
if (timedata['created_at'][i][4:7] == 'Sep'):
mon = '9'
else:
if (timedata['created_at'][i][4:7] == 'Oct'):
mon = '10'
else:
if (timedata['created_at'][i][4:7] == 'Nov'):
mon = '11'
else:
mon = '12'
date = timedata['created_at'][i][7:10]
hour = timedata['created_at'][i][10:13]
minu = timedata['created_at'][i][14:16]
sec = timedata['created_at'][i][17:20]
value = timedata['value'][i]
data1 = ("[Date.UTC("+str(year)+","+str(mon)+","+str(date)+","+str(hour)+","+str(minu)+","+str(sec)+"),"+str(value)+"]")
if (len(timedata)):
data.append
data.append(data1)
data = ",\n".join(data)+"\n]"
data = data.replace("[,","[")
return data
def tokenize(s):
tokens=tokens_re.findall(s)
return [ x for x in tokens if 'http' not in x and len(x)>1 and x.lower() not in stop]
def preprocess(s, lowercase=True):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
return tokens
def collect_pairs(lines):
pair_counter = Counter()
for line in lines:
unique_tokens = sorted(set(line)) # exclude duplicates in same line and sort to ensure one word is always before other
combos = combinations(unique_tokens, 2)
pair_counter += Counter(combos)
return pair_counter
#Co-occurrence:
def co_occur(tweets):
t2 = []
t1 =tweets['text']
for t in range(len(t1)):
t2.append(preprocess(t1[t]))
pairs = collect_pairs(t2)
top_pairs = pairs.most_common(200)
nodes={}
links=["\"links\":["]
count =0
len_top=len(top_pairs)
nptp = np.array(top_pairs)
maxtp = np.max(nptp[:,1])
for p in range(len(top_pairs)):
for i in range(2):
if top_pairs[p][0][i] not in nodes:
nodes[top_pairs[p][0][i]] = count
count+=1
link="{ \"source\":"+str(nodes[top_pairs[p][0][0]])+",\"target\":"+str(nodes[top_pairs[p][0][1]])+",\"value\":"+str(round(top_pairs[p][1]*10/maxtp))+"}"
links.append(link)
links=",\n".join(links)+"\n]"
links=links.replace("[,","[")
nodes = sorted(nodes.items(), key=lambda x: x[1])
nodes1=["\"nodes\":["]
for p in range(len(nodes)):
nodes1.append("{ \"name\":\""+nodes[p][0]+"\",\"group\":"+"0}")
nodes1=",\n".join(nodes1)+"\n]"
nodes1=nodes1.replace("[,","[")
return nodes1,links
def heatworldgrid(newFolder,worldOld,tweets):
contdata=pd.read_csv("continents.txt")
contdat=contdata.fillna("NA")
tweets['sentiment']=tweets['sentiment'].apply(pd.to_numeric)
#print (tweets.dtypes)
Countryhour=pd.DataFrame({'sentiment' : tweets.groupby( ["country_code","hour"] )['sentiment'].mean()}).reset_index()
final=pd.merge(Countryhour, contdata, how='left',left_on="country_code",right_on="country")
print (final.columns.values)
del final['country']
#del final['Unnamed: 0']
del final['country_code']
Conthour=pd.DataFrame({'sentiment' : final.groupby( ["continent","hour"] )['sentiment'].mean()}).reset_index()
Conthour = pd.merge(worldOld, Conthour, how='left', left_on=["continent","hour"] , right_on = ["continent","hour"] )
Conthour=Conthour.fillna(0)
Conthour['sentiment']=(Conthour['sentiment_x']*1000000+Conthour['sentiment_y']*10000)/(1010000)
del Conthour['sentiment_x']
del Conthour['sentiment_y']
Conthour.to_csv(newFolder+"Continent-hour-senti.csv",index=False)
minVal=min(Conthour['sentiment'])
maxVal=max(Conthour['sentiment'])
outputStr=""
uniqueCont= list(np.unique(Conthour['continent']))
outputStr+="var continent =["+",".join(["'"+i+"'" for i in uniqueCont])+"];\n"
numCont=len(uniqueCont)
numHour=24
outputStr+="var hour =["+",".join(["'"+str(i)+"'" for i in range(numHour)])+"];\n"
outMatrix=np.zeros(shape=(numCont,numHour))
outputStr+="var data=["
datastr=[]
for i in range(len(Conthour)):
continent=Conthour['continent'][i]
hour=Conthour['hour'][i]
contIndex=uniqueCont.index(continent)
outMatrix[contIndex][int(hour)]=Conthour['sentiment'][i]
for i in range(numCont):
for j in range(numHour):
datastr.append("["+str(j)+","+str(i)+","+str(int(outMatrix[i][j]))+"]")
outputStr+=",".join(datastr)+"]; var minval = "+str(minVal)+";\n var maxval = "+str(maxVal)+";"
return outputStr
def createwordcloud(tweets):
# Read the whole text.
#text = open(path.join(d, 'constitution.txt')).read()
textpos = tweets[tweets.sentiment_cat == 'Pos']
textneg = tweets[tweets.sentiment_cat == 'Neg']
postweets=""
for i in textpos.index.values:
postweets+=textpos['text'][i]+" "
negtweets=""
for i in textneg.index.values:
negtweets+=textneg['text'][i]+" "
textp = preprocess(postweets)
textp=" ".join(textp)
textn = preprocess(negtweets)
textn=" ".join(textn)
wordcloudp = WordCloud( stopwords=stop,background_color='white',width=1200,height=1000).generate(textp)
wordcloudn = WordCloud( stopwords=stop,background_color='white', width=1200,height=1000).generate(textn)
image1 = wordcloudp.to_image()
image2= wordcloudn.to_image()
image1.save("wordcloup.png")
image2.save("wordcloudn.png")
def analyze(tweets_data):
oldFolder="Data\\"
outputFolder="OutputJS\\"
newFolder="NewData\\"
#Dataframe is created from the list of json tweets, sentiment is also calculated
tweets=create_dataframe(tweets_data)
statedata=pd.read_csv(oldFolder+"states.csv")
tweetsFinal=pd.merge(tweets, statedata, how='left',left_on="state",right_on="Abbreviation")
#UsStatewise Tweets
usStateOld=pd.read_csv(oldFolder+"usStatesCount.csv")
usState=pd.DataFrame({'value' : tweetsFinal.groupby( [ "State"] ).size()}).reset_index()
usState_new = pd.merge(usStateOld, usState, how='left', left_on='State', right_on = 'State')
usState_new=usState_new.fillna(0)
usState_new['value']=usState_new['value_x']+usState_new['value_y']
del usState_new['value_x']
del usState_new['value_y']
usState_new.to_csv(newFolder+"usStatesCount.csv",index=False)
print (usState_new.head())
usStateJson=usState_new.to_json(orient = "records")
usStateJsonfinalOutput=usStateJson[33:len(usStateJson)-1].upper().replace("\"STATE\"","ucName").replace("\"VALUE\"","value")
with open('Final\\US_heat_count\\data.js', 'w') as outfile:
outfile.write(usStateJsonfinalOutput)
#UsStatewise Sentiment
usStateSentiOld=pd.read_csv(oldFolder+"usStates-SentiCount.csv")
statesentiout=state_senti(newFolder,usStateSentiOld,tweetsFinal)
with open('Final\\map-pies\\data.js', 'w') as outfile:
outfile.write(statesentiout)
#TimeSeries Chart
timeOld=pd.read_csv(oldFolder+"timeseries.csv")
timedata=create_timechart(newFolder,timeOld,tweets)
with open('Final\\dynamic-master-detail\\time_series.js', 'w') as outfile:
outfile.write(timedata)
#Co-occur Chart
nodes1,links=co_occur(tweets)
with open(outputFolder+'cooccur_word-1.json', 'w') as outfile:
outfile.write("{\n"+nodes1+",\n"+links+"}\n")
#Heat World Grid
worldOld=pd.read_csv(oldFolder+"Continent-hour-senti.csv")
heatjson=heatworldgrid(newFolder,worldOld,tweets)
with open('Final\\heatmap\\heatchart_data-1.js', 'w') as outfile:
outfile.write(heatjson)
#WordCloud
createwordcloud(tweets)
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
global Count,tweets_data
Count+=1
#TweetCount+=1
if Count%1000==0:
print ("Analyze data started")
x=time.time()
analyze(tweets_data)
print ("Analyze data Completed in ", time.time()-x)
sys.exit(errno.EACCES)
tweets_data=[]
Count=0
tweet = json.loads(data)
tweets_data.append(tweet)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(languages=["en"],track=['a', 'e', 'i','o','u','#']) | apache-2.0 |
mikekestemont/ruzicka | code/04latin_test_o2.py | 1 | 3340 | from __future__ import print_function
import os
import time
import json
import pickle
import sys
from itertools import product, combinations
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from ruzicka.utilities import binarize
from ruzicka.vectorization import Vectorizer
from ruzicka.utilities import load_pan_dataset, train_dev_split, get_vocab_size
from sklearn.cross_validation import train_test_split
from ruzicka.score_shifting import ScoreShifter
from ruzicka.evaluation import pan_metrics
from ruzicka.Order2Verifier import Order2Verifier as Verifier
import ruzicka.art as art
# run script for top-5 metrics
ngram_type = 'word'
ngram_size = 1
base = 'profile'
vector_space = 'tf_std'
metric = 'cosine'
nb_bootstrap_iter = 100
rnd_prop = 0.5
nb_imposters = 30
mfi = sys.maxint
min_df = 2
# get imposter data:
train_data, _ = load_pan_dataset('../data/latin/dev') # ignore unknown documents
train_labels, train_documents = zip(*train_data)
# get test data:
test_data, _ = load_pan_dataset('../data/latin/test') # ignore unknown documents
test_labels, test_documents = zip(*test_data)
# fit encoder for author labels:
label_encoder = LabelEncoder()
label_encoder.fit(train_labels+test_labels)
train_ints = label_encoder.transform(train_labels)
test_ints = label_encoder.transform(test_labels)
# fit vectorizer:
vectorizer = Vectorizer(mfi = mfi,
vector_space = vector_space,
ngram_type = ngram_type,
ngram_size = ngram_size)
vectorizer.fit(train_documents+test_documents)
train_X = vectorizer.transform(train_documents).toarray()
test_X = vectorizer.transform(test_documents).toarray()
cols = ['label']
for test_author in sorted(set(test_ints)):
auth_label = label_encoder.inverse_transform([test_author])[0]
cols.append(auth_label)
proba_df = pd.DataFrame(columns=cols)
for idx in range(len(test_documents)):
target_auth = test_ints[idx]
target_docu = test_X[idx]
non_target_test_ints = np.array([test_ints[i] for i in range(len(test_ints)) if i != idx])
non_target_test_X = np.array([test_X[i] for i in range(len(test_ints)) if i != idx])
tmp_train_X = np.vstack((train_X, non_target_test_X))
tmp_train_y = np.hstack((train_ints, non_target_test_ints))
tmp_test_X, tmp_test_y = [], []
for t_auth in sorted(set(test_ints)):
tmp_test_X.append(target_docu)
tmp_test_y.append(t_auth)
# fit the verifier:
verifier = Verifier(metric = metric,
base = base,
nb_bootstrap_iter = nb_bootstrap_iter,
rnd_prop = rnd_prop)
verifier.fit(tmp_train_X, tmp_train_y)
probas = verifier.predict_proba(test_X = tmp_test_X,
test_y = tmp_test_y,
nb_imposters = nb_imposters)
row = [label_encoder.inverse_transform([target_auth])[0]] # author label
row += list(probas)
print(row)
proba_df.loc[len(proba_df)] = row
proba_df = proba_df.set_index('label')
# write away score tables:
table_dir = '../output/tables/'
if not os.path.isdir(table_dir):
os.mkdir(table_dir)
proba_df.to_csv(table_dir+'lat_proba_'+metric+'_'+vector_space+'.csv')
| mit |
ndingwall/scikit-learn | sklearn/dummy.py | 5 | 21753 | # Author: Mathieu Blondel <[email protected]>
# Arnaud Joly <[email protected]>
# Maheshakya Wijewardena <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .base import MultiOutputMixin
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted, _check_sample_weight
from .utils.random import _random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
from .utils.validation import _deprecate_positional_args
class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"stratified", "most_frequent", "prior", "uniform", \
"constant"}, default="prior"
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
.. versionchanged:: 0.24
The default value of `strategy` has changed to "prior" in version
0.24.
random_state : int, RandomState instance or None, default=None
Controls the randomness to generate the predictions when
``strategy='stratified'`` or ``strategy='uniform'``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
constant : int or str or array-like of shape (n_outputs,)
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list thereof
Class labels for each output.
n_classes_ : int or list of int
Number of label for each output.
class_prior_ : ndarray of shape (n_classes,) or list thereof
Probability of each class for each output.
n_outputs_ : int
Number of outputs.
sparse_output_ : bool
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> X = np.array([-1, 1, 1, 1])
>>> y = np.array([0, 1, 1, 1])
>>> dummy_clf = DummyClassifier(strategy="most_frequent")
>>> dummy_clf.fit(X, y)
DummyClassifier(strategy='most_frequent')
>>> dummy_clf.predict(X)
array([1, 1, 1, 1])
>>> dummy_clf.score(X, y)
0.75
"""
@_deprecate_positional_args
def __init__(self, *, strategy="prior", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
"""
allowed_strategies = ("most_frequent", "stratified", "uniform",
"constant", "prior")
if self.strategy not in allowed_strategies:
raise ValueError("Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies))
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.n_features_in_ = None # No input validation is done for X
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
err_msg = ("The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}."
.format(self.constant, list(self.classes_[k])))
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self._strategy == "stratified":
y = np.vstack([classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)]).T
elif self._strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list thereof
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, object with finite length or shape}
Training data, requires length = n_samples
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list thereof
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
def _more_tags(self):
return {
'poor_score': True, 'no_validation': True,
'_xfail_checks': {
'check_methods_subset_invariance':
'fails for the predict method',
'check_methods_sample_order_invariance':
'fails for the predict method'
}
}
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyClassifier
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"mean", "median", "quantile", "constant"}, default="mean"
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array-like of shape (n_outputs,), default=None
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0], default=None
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : ndarray of shape (1, n_outputs)
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int
Number of outputs.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyRegressor
>>> X = np.array([1.0, 2.0, 3.0, 4.0])
>>> y = np.array([2.0, 3.0, 5.0, 10.0])
>>> dummy_regr = DummyRegressor(strategy="mean")
>>> dummy_regr.fit(X, y)
DummyRegressor()
>>> dummy_regr.predict(X)
array([5., 5., 5., 5.])
>>> dummy_regr.score(X, y)
0.0
"""
@_deprecate_positional_args
def __init__(self, *, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
"""
allowed_strategies = ("mean", "median", "quantile", "constant")
if self.strategy not in allowed_strategies:
raise ValueError("Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies))
y = check_array(y, ensure_2d=False)
self.n_features_in_ = None # No input validation is done for X
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.n_outputs_ != 1 and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X, return_std=False):
"""
Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full((n_samples, self.n_outputs_), self.constant_,
dtype=np.array(self.constant_).dtype)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
def _more_tags(self):
return {'poor_score': True, 'no_validation': True}
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the residual
sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyRegressor
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
| bsd-3-clause |
yasirkhan380/Tutorials | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
maxlikely/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 44 | 7031 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
"""Test partial dependence for classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
"""Test partial dependence for multi-class classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
"""Test partial dependence for regressor """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
"""Test input validation of partial dependence. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
"""Test partial dependence plot function. """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
"""Test partial dependence plot function input checks. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
"""Test partial dependence plot function on multi-class input. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
hrjn/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/multi/test_contains.py | 2 | 3306 | import numpy as np
import pytest
from pandas.compat import PYPY
import pandas as pd
from pandas import MultiIndex
import pandas.util.testing as tm
def test_contains_top_level():
midx = MultiIndex.from_product([["A", "B"], [1, 2]])
assert "A" in midx
assert "A" not in midx._engine
def test_contains_with_nat():
# MI with a NaT
mi = MultiIndex(
levels=[["C"], pd.date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
assert ("C", pd.Timestamp("2012-01-01")) in mi
for val in mi.values:
assert val in mi
def test_contains(idx):
assert ("foo", "two") in idx
assert ("bar", "two") not in idx
assert None not in idx
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy():
idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, True]))
tm.assert_numpy_array_equal(
idx.isin([("bar", float("nan"))]), np.array([False, True])
)
def test_isin():
values = [("foo", 2), ("bar", 3), ("quux", 4)]
idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy():
idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, False]))
tm.assert_numpy_array_equal(
idx.isin([("bar", float("nan"))]), np.array([False, False])
)
def test_isin_level_kwarg():
idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)])
vals_0 = ["foo", "bar", "quux"]
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
msg = "Too many levels: Index has only 2 levels, not 6"
with pytest.raises(IndexError, match=msg):
idx.isin(vals_0, level=5)
msg = "Too many levels: Index has only 2 levels, -5 is not a valid level number"
with pytest.raises(IndexError, match=msg):
idx.isin(vals_0, level=-5)
with pytest.raises(KeyError, match=r"'Level 1\.0 not found'"):
idx.isin(vals_0, level=1.0)
with pytest.raises(KeyError, match=r"'Level -1\.0 not found'"):
idx.isin(vals_1, level=-1.0)
with pytest.raises(KeyError, match="'Level A not found'"):
idx.isin(vals_1, level="A")
idx.names = ["A", "B"]
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level="A"))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level="B"))
with pytest.raises(KeyError, match="'Level C not found'"):
idx.isin(vals_1, level="C")
| apache-2.0 |
OpenTrading/OpenTrader | setup.py | 1 | 2533 | #!/usr/bin/env python
import codecs
import os
import sys
import glob
from setuptools import setup, find_packages
try:
# http://stackoverflow.com/questions/21698004/python-behave-integration-in-setuptools-setup-py
from setuptools_behave import behave_test
except ImportError:
behave_test = None
dirname = os.path.dirname(__file__)
long_description = (
codecs.open(os.path.join(dirname, "README.creole"), encoding="utf-8").read() + "\n"
)
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["zmq"], "excludes": ["tkinter"]}
setup(
name="OpenTrader",
description="OpenTrader",
long_description=long_description,
author="Open Trading",
license="LGPL2 license",
url="https://www.github.com/OpenTrading/OpenTrader",
version='1.0',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: LGPL2 License",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Topic :: Office/Business :: Financial :: Investment",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2",
] + [("Programming Language :: Python :: %s" % x) for x in "2.6 2.7".split()],
install_requires=[
"configobj",
"pandas",
"pyparsing",
# we'll make zmq default now
"zmq",
],
extras_require={'plotting': ["matplotlib"],
'pybacktest': ["pybacktest"],
'rabbit': ["pyrabbit"],
'doc': ["python-creole", "invoke"],
# we'll make zmq default now
# 'zmq': ["zmq"],
'amqp': ["pika"],
},
data_files=[('', ['README.creole']),
('OpenTrader', glob.glob('OpenTrader/*.ini')),
('OpenTrader/Omlettes', glob.glob('OpenTrader/Omlettes/*.ini'))],
options = {"build_exe": build_exe_options},
entry_points={
"console_scripts": [
"OTCmd2 = OpenTrader.OTCmd2:iMain",
"OTBackTest = OpenTrader.OTBackTest:iMain",
"OTPpnAmgc = OpenTrader.OTPpnAmgc:iMain",
]
},
tests_require=["behave>=1.2.5"],
cmdclass=behave_test and {"behave_test": behave_test,} or {},
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| lgpl-3.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/feature_selection/rfe.py | 1 | 17079 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| apache-2.0 |
alekz112/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
Djabbz/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
iainr/fridgid | Fridge.py | 1 | 9372 | import RPi.GPIO as GPIO
import datetime
import time
import pandas as pd
import logging
import logging.handlers
import sys
logger = logging.getLogger('fridge')
handler = logging.StreamHandler()
fHandler = logging.FileHandler('fridge.log')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
fHandler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(fHandler)
logger.setLevel(logging.DEBUG)
logging.captureWarnings(True)
dataLog = logging.getLogger('fridge.data')
dataFormatter = logging.Formatter("%(asctime)s, %(message)s", "%Y-%m-%d %H:%M:%S")
dataFileName = 'fridge-' + str(datetime.datetime.now()) + '.data'
dataHandler = logging.handlers.RotatingFileHandler(dataFileName, mode='w', maxBytes=10000, backupCount=2)
dataHandler.setFormatter(dataFormatter)
dataLog.addHandler(dataHandler)
dataLog.setLevel(logging.INFO)
class Fridge:
def __init__(self, heaterGpio, coolerGpio, ambientTempSensorRomCode):
self.initGpio(heaterGpio, coolerGpio)
self.heater = TemperatureElement(heaterGpio, name='heater')
self.cooler = TemperatureElement(coolerGpio, name='cooler')
self.ambientTempSensor = DS18B20(ambientTempSensorRomCode, name='TempSens')
self.resultPeriod = datetime.timedelta(minutes=10)
self.maxResults = 1000
self.lastResultTime = None
self.resultTime = datetime.datetime.now()
self.resultsFile = 'results.txt'
fo = open(self.resultsFile, 'w')
fo.close()
def initGpio(self, heaterGpioPin, coolerGpioPin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(heaterGpioPin, GPIO.OUT)
GPIO.setup(coolerGpioPin, GPIO.OUT)
def updateResultsLog(self, dataFile):
if datetime.datetime.now() >= self.resultTime:
now = datetime.datetime.now()
names = ['date', 'set', 'meas', 'heater', 'cooler']
d = pd.read_csv(dataFile, names=names)
d['date'] = pd.to_datetime(d['date'])
d['error'] = d.meas - d.set
d['absError'] = d['error'].abs()
if self.lastResultTime == None:
dt = d
else:
start = self.lastResultTime
end = self.resultTime
mask = (d['date'] > start) & (d['date'] <= end)
dt = d.loc[mask]
mean = dt.meas.mean()
maxErr = dt.error.max()
minErr = dt.error.min()
meanErr = dt.error.mean()
meanAbsErr = dt.absError.mean()
set = d['set'].iloc[-1]
names = ['date', 'set', 'mean', 'maxErr', 'minErr', 'meanErr', 'meanAbsErr']
d_r = pd.read_csv(self.resultsFile, names=names)
try:
fi = open(self.resultsFile, 'r')
resBefore = fi.read()
resBefore = resBefore.split('\n')
fi.close()
except:
whatever = 1000
fo = open(self.resultsFile, 'w')
fo.write('{:11s}'.format('Date'))
fo.write('{:9s}'.format('Time'))
fo.write('{:5s}'.format('set'))
fo.write('{:5s}'.format('mean'))
fo.write('{:5s}'.format('maxE'))
fo.write('{:5s}'.format('minE'))
fo.write('{:6s}'.format('meanE'))
fo.write('{:9s}'.format('meanAbsE') + '\n')
fo.write( self.resultTime.strftime('%Y-%m-%d %H:%M:%S') + ' ' + '{:4.1f}'.format(set) + ' ' + '{:4.1f}'.format(mean) + ' ' + '{:4.1f}'.format(maxErr) + ' ' + '{:4.1f}'.format(minErr) + ' ' + '{:5.1f}'.format(meanErr) + ' ' + '{:8.1f}'.format(meanAbsErr) + '\n' )
if len(resBefore) >= 2:
for i in xrange(1, len(resBefore)-1, 1):
fo.write(resBefore[i] + '\n')
if i > self.maxResults:
break
fo.close()
self.lastResultTime = self.resultTime
self.resultTime = now + self.resultPeriod
class TemperatureElement:
def __init__(self, bcmGpioNum, name='Name'):
self.name = name
self.gpioPin = bcmGpioNum
self.on = None
self.lastOnTime = None
self.minOnTime = datetime.timedelta(minutes=1)
self.minOffTime = datetime.timedelta(minutes=3)
try:
GPIO.output(self.gpioPin, False)
self.lastOffTime = datetime.datetime.now()
except:
logger.error('Failed to switch off in temp el init')
raise
def isOn(self):
if(GPIO.input(self.gpioPin)):
return True
else:
return False
def status(self):
if(GPIO.input(self.gpioPin)):
try:
onFor = str(datetime.datetime.now()-self.lastOnTime).split('.')[0]
except:
onFor = 'No Last On Time'
logger.debug(self.name + " been ON for " + onFor)
return self.name + " ON for " + onFor
else:
try:
offFor = str(datetime.datetime.now()-self.lastOffTime).split('.')[0]
except:
offFor = 'No Last Off Time'
logger.debug(self.name +" been OFF for " + offFor)
return self.name +" OFF for " + offFor
def turnOff(self):
now = datetime.datetime.now()
switchOff = False
#if not been on/off yet then can switch off
if self.on == None:
switchOff = True
#if not been on yet, and not currently off then can switch off
elif self.lastOnTime == None and self.on != False:
switchOff = True
#if on, and have been on for at least minOnTime then can switch off
elif self.on == True:
if (now - self.lastOnTime) > self.minOnTime:
switchOff = True
else:
logger.debug(self.name + ' Unable to switch off. Min On Time not met' )
elif self.on == False:
switchOff = False # Already off
else:
logger.debug(self.name + ' Unable to switch off. Valid condition not found.' )
#Switch on if have decided to
if switchOff == True:
try:
GPIO.output(self.gpioPin, False)
self.lastOffTime = now
self.on = False
logger.debug(self.name + ' Switched Off Return 1' )
return 1
except:
logger.debug(self.name + ' Exception Return -1' )
raise
return -1
else:
logger.debug(self.name + ' No Change Return 0.' )
return 0
def turnOn(self):
now = datetime.datetime.now()
switchOn = False
#if not been on/off yet then can switch on
if self.on == None:
switchOn = True
#if not been off yet, and not currently on then can switch on
elif self.lastOffTime == None and self.on != True:
switchOn = True
#if off, and have been off for at least minOffTime then can switch on
elif self.on == False:
if (now - self.lastOffTime) > self.minOffTime:
switchOn = True
else:
logger.debug(self.name + ' Unable to switch on. Min Off Time not met' )
elif self.on == True:
switchOn = False # Already off
else:
logger.debug(self.name + ' Unable to switch on. Valid condition not found.' )
#Switch on if have decided to
if switchOn == True:
try:
GPIO.output(self.gpioPin, True)
self.lastOnTime = now
self.on = True
logger.debug(self.name + ' Switched On Return 1' )
return 1
except:
logger.debug(self.name + ' Exception Return -1' )
raise
return -1
else:
logger.debug(self.name + ' No Change Return 0' )
return 0
class DS18B20:
def __init__(self, romCode, name='Name'):
self.name = name
self.romCode = romCode
def getTemp(self):
tempFile = open('/sys/bus/w1/devices/' + self.romCode + '/w1_slave')
tempText = tempFile.read()
tempFile.close()
tempData = tempText.split("\n")[1].split(" ")[9]
temp = float(tempData[2:]) / 1000
logger.debug(self.name + ' ' + str(temp))
return temp
heaterGpio = 6
coolerGpio = 5
tempSensRomCode='28-0316027c72ff'
fridge = Fridge(heaterGpio, coolerGpio, tempSensRomCode)
fridge.heater.minOffTime=datetime.timedelta(seconds=1)
fridge.heater.minOnTime=datetime.timedelta(seconds=1)
fridge.cooler.minOffTime=datetime.timedelta(minutes=3)
fridge.cooler.minOnTime=datetime.timedelta(minutes=1)
fridge.ambientTempSensor.getTemp()
samplePeriod = datetime.timedelta(seconds=10)
setTemp = 21
heaterOnHyst = 0.2 #Amount below set temp that heater is asked to switch on at
heaterOffHyst = 0.1 #Amount below set temp that heater is asked to switch off at
coolerOnHyst = 1.5 #Amount above set temp that cooler is asked to switch on at
coolerOffHyst = 1 #Amount above set temp that cooler is asked to switch off at
i=0
while True:
try:
i=i+1
loopStartTime = datetime.datetime.now()
temp = fridge.ambientTempSensor.getTemp()
logger.debug('i=' + str(i) + ' Error=' + str(temp-setTemp) + ' Temp=' + str(temp) + ' Set temp=' + str(setTemp))
temp = fridge.ambientTempSensor.getTemp()
fridge.heater.status()
fridge.cooler.status()
#Heater decision
#If heater not on and temp is below set - heaterOnHyst then try to switch on
if not fridge.heater.isOn():
if temp < (setTemp - heaterOnHyst):
fridge.heater.turnOn()
#If heater is on and temp above setTemp - heaetr OffHyst then try to switch off
if fridge.heater.isOn():
if temp > (setTemp - heaterOffHyst):
fridge.heater.turnOff()
#Cooler decision
#If cooler not on and temp is above set + coolerOnHyst then try to switch cooler on
if not fridge.cooler.isOn():
if temp > (setTemp + coolerOnHyst):
fridge.cooler.turnOn()
#If cooler is on and temp below setTemp + coolerOffHyst then try to switch off
if fridge.cooler.isOn():
if temp < (setTemp + coolerOffHyst):
fridge.cooler.turnOff()
dataLog.info('{}'.format(setTemp) + ', ' + '{}'.format(temp) + ', ' + str(fridge.heater.isOn()) + ', ' + '{}'.format(fridge.cooler.isOn()) )
fridge.updateResultsLog(dataFileName)
while datetime.datetime.now() < (loopStartTime + samplePeriod):
doNothing = 1
except KeyboardInterrupt:
logger.info('Ctrl-c Exit.')
fridge.heater.turnOff()
fridge.cooler.turnOff()
sys.exit()
| lgpl-3.0 |
ptkool/spark | python/pyspark/sql/tests/test_pandas_udf.py | 12 | 10115 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.sql.utils import ParseException
from pyspark.rdd import PythonEvalType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from py4j.protocol import Py4JJavaError
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
def test_pandas_udf_detect_unsafe_type_conversion(self):
import pandas as pd
import numpy as np
values = [1.0] * 3
pdf = pd.DataFrame({'A': values})
df = self.spark.createDataFrame(pdf).repartition(1)
@pandas_udf(returnType="int")
def udf(column):
return pd.Series(np.linspace(0, 1, len(column)))
# Since 0.11.0, PyArrow supports the feature to raise an error for unsafe cast.
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.select(['A']).withColumn('udf', udf('A')).collect()
# Disabling Arrow safe type check.
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": False}):
df.select(['A']).withColumn('udf', udf('A')).collect()
def test_pandas_udf_arrow_overflow(self):
import pandas as pd
df = self.spark.range(0, 1)
@pandas_udf(returnType="byte")
def udf(column):
return pd.Series([128] * len(column))
# When enabling safe type check, Arrow 0.11.0+ disallows overflow cast.
with self.sql_conf({
"spark.sql.execution.pandas.arrowSafeTypeConversion": True}):
with self.assertRaisesRegexp(Exception,
"Exception thrown when converting pandas.Series"):
df.withColumn('udf', udf('id')).collect()
# Disabling safe type check, let Arrow do the cast anyway.
with self.sql_conf({"spark.sql.execution.pandas.arrowSafeTypeConversion": False}):
df.withColumn('udf', udf('id')).collect()
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
raghavrv/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
perryjohnson/biplaneblade | sandia_blade_lib/prep_stn32_mesh.py | 1 | 10860 | """Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 32
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.spar_cap.layer['upper']
is1 = st.internal_surface_1.layer['resin']
points_usc = [
tuple(usc.left[0]), # SparCap_upper.txt
(usc.left[0][0], 0.1),
is1.polygon.interiors[0].coords[0], # InternalSurface1_resin.txt
tuple(usc.right[1]), # SparCap_upper.txt
(usc.right[1][0], 0.25),
(usc.left[0][0], 0.25)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.spar_cap.layer['lower']
points_lsc = [
tuple(lsc.left[1]),
(lsc.left[1][0], 0.0),
is1.polygon.interiors[0].coords[292-222], # InternalSurface1_resin.txt
tuple(lsc.right[0]), # SparCap_lower.txt
(lsc.right[0][0], -0.15),
(lsc.left[1][0], -0.15)
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.TE_reinforcement.layer['foam']
points_teu1 = [
(ter.top[0][0], 0.25), # TE_Reinforcement_foam.txt
tuple(ter.top[0]), # TE_Reinforcement_foam.txt
(0.47, 0.12),
is1.polygon.interiors[0].coords[457-222], # InternalSurface1_resin.txt
(is1.polygon.interiors[0].coords[457-222][0], 0.25) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -0.15), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]), # TE_Reinforcement_foam.txt
(0.47, -0.01),
(0.7, 0.05),
points_teu1[-2], # InternalSurface1_resin.txt
(points_teu1[-1][0], -0.15) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
is1t = st.internal_surface_1.layer['triax']
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
is1t.polygon.interiors[0].coords[364-176], # InternalSurface1_triax.txt
is1t.polygon.exterior.coords[24-3], # InternalSurface1_triax.txt
(is1t.polygon.exterior.coords[24-3][0], 0.25) # InternalSurface1_triax.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -0.1),
points_teu2[1],
points_teu2[2],
points_teu2[3],
(points_teu2[3][0], -0.1)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
teru = st.TE_reinforcement.layer['uniax']
est = st.external_surface.layer['triax']
esg = st.external_surface.layer['gelcoat']
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0],
teru.polygon.exterior.coords[0],
(est.polygon.exterior.coords[-1][0], 0.002),
est.polygon.exterior.coords[-2],
esg.polygon.exterior.coords[-2],
(esg.polygon.exterior.coords[-2][0], 0.25)
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -0.1),
points_teu3[1],
points_teu3[2],
points_teu3[3],
points_teu3[4],
est.polygon.exterior.coords[-1],
esg.polygon.exterior.coords[-1],
(points_teu3[4][0], -0.1)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.LE_panel.layer['foam']
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-0.7,-0.1),
(lep.bottom[0][0],-0.1),
(lep.bottom[0][0],0.25),
(-0.7, 0.25)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.LE_panel.layer['foam']
],
alt_TE_reinforcement=True,
soft_warning=False)
| gpl-3.0 |
wilseypa/warped2-models | scripts/plotBags.py | 1 | 12059 | #!/usr/bin/python
# Calculates statistics and plots the bag metrics from raw data
from __future__ import print_function
import csv
import os, sys
import numpy as np
import scipy as sp
import scipy.stats as sps
import pandas as pd
import re, shutil, tempfile
import itertools, operator
import subprocess
import Gnuplot
import Gnuplot.funcutils
###### Settings go here ######
searchAttrsList = [
{ 'groupby': ['Worker_Thread_Count', 'Static_Window_Size'],
'filter' : 'Fraction_of_Total_Window',
'model' : 'Model',
'lpcount': 'Number_of_Objects',
'output' : 'threads_vs_staticwindow_key_fractionwindow_' },
{ 'groupby': ['Worker_Thread_Count', 'Fraction_of_Total_Window'],
'filter' : 'Static_Window_Size',
'model' : 'Model',
'lpcount': 'Number_of_Objects',
'output' : 'threads_vs_fractionwindow_key_staticwindow_' }
]
'''
List of metrics available:
Event_Commitment_Ratio
Total_Rollbacks
Simulation_Runtime_(secs.)
Average_Memory_Usage_(MB)
Event_Processing_Rate_(per_sec)
Speedup_w.r.t._Sequential_Simulation
'''
metricList = [
{ 'name' : 'Event_Processing_Rate_(per_sec)',
'ystart': 0,
'yend' : 1000000,
'ytics' : 100000 },
{ 'name' : 'Simulation_Runtime_(secs.)',
'ystart': 0,
'yend' : 150,
'ytics' : 10 },
{ 'name' : 'Event_Commitment_Ratio',
'ystart': 1,
'yend' : 2,
'ytics' : 0.1 },
{ 'name' : 'Speedup_w.r.t._Sequential_Simulation',
'ystart': 0,
'yend' : 10,
'ytics' : 1 }
]
rawDataFileName = 'bags'
statType = [ 'Mean',
'CI_Lower',
'CI_Upper',
'Median',
'Lower_Quartile',
'Upper_Quartile'
]
###### Don't edit below here ######
def mean_confidence_interval(data, confidence=0.95):
# check the input is not empty
if not data:
raise RuntimeError('mean_ci - no data points passed')
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sps.sem(a)
h = se * sps.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def median(data):
# check the input is not empty
if not data:
raise RuntimeError('median - no data points passed')
return np.median(np.array(data))
def quartiles(data):
# check the input is not empty
if not data:
raise RuntimeError('quartiles - no data points passed')
sorts = sorted(data)
mid = len(sorts) / 2
if (len(sorts) % 2 == 0):
# even
lowerQ = median(sorts[:mid])
upperQ = median(sorts[mid:])
else:
# odd
lowerQ = median(sorts[:mid]) # same as even
upperQ = median(sorts[mid+1:])
return lowerQ, upperQ
def statistics(data):
# check the input is not empty
if not data:
raise RuntimeError('statistics - no data points passed')
mean = ci_lower = ci_upper = med = lower_quartile = upper_quartile = data[0]
if len(data) > 1:
mean, ci_lower, ci_upper = mean_confidence_interval(data)
med = median(data)
lower_quartile, upper_quartile = quartiles(data)
statList = (str(mean), str(ci_lower), str(ci_upper), str(med), str(lower_quartile), str(upper_quartile))
return ",".join(statList)
def sed_inplace(filename, pattern, repl):
# For efficiency, precompile the passed regular expression.
pattern_compiled = re.compile(pattern)
# For portability, NamedTemporaryFile() defaults to mode "w+b" (i.e., binary
# writing with updating). In this case, binary writing imposes non-trivial
# encoding constraints resolved by switching to text writing.
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
with open(filename) as src_file:
for line in src_file:
tmp_file.write(pattern_compiled.sub(repl, line))
# Overwrite the original file with the temporary file in a
# manner preserving file attributes (e.g., permissions).
shutil.copystat(filename, tmp_file.name)
shutil.move(tmp_file.name, filename)
def getIndex(aList, text):
'''Returns the index of the requested text in the given list'''
for i,x in enumerate(aList):
if x == text:
return i
def plot(data, fileName, title, subtitle, xaxisLabel, yaxisLabel, ystart, yend, ytics, linePreface):
# Replace '_' with ' '
g = Gnuplot.Gnuplot()
multiLineTitle = title.replace("_", " ") + '\\n '+ subtitle.replace("_", " ")
g.title(multiLineTitle)
g("set terminal svg noenhanced background rgb 'white' size 1000,800 fname 'Helvetica' fsize 16")
g("set key box outside center top horizontal font ',12' ")
g("set autoscale xy")
#g("set yrange [{0}:{1}]".format(unicode(ystart), unicode(yend)))
#g("set ytics {}".format(unicode(ytics)))
g("set grid")
g.xlabel(xaxisLabel.replace("_", " "))
g.ylabel(yaxisLabel.replace("_", " "))
g('set output "' + fileName + '"')
d = []
for key in sorted(data[statType[0]]):
result = Gnuplot.Data( data['header'][key],data[statType[0]][key],\
data[statType[1]][key],data[statType[2]][key],\
with_="yerrorlines",title=linePreface+key )
d.append(result)
g.plot(*d)
def plot_stats(dirPath, fileName, xaxisLabel, keyLabel, filterLabel, filterValue, model, lpCount):
# Read the stats csv
inFile = dirPath + 'stats/' + rawDataFileName + '/' + fileName + '.csv'
reader = csv.reader(open(inFile,'rb'))
header = reader.next()
# Get Column Values for use below
xaxis = getIndex(header, xaxisLabel)
kid = getIndex(header, keyLabel)
reader = sorted(reader, key=lambda x: int(x[xaxis]), reverse=False)
reader = sorted(reader, key=lambda x: x[kid], reverse=False)
for param in metricList:
metric = param['name']
ystart = param['ystart']
yend = param['yend']
ytics = param['ytics']
outData = {'header':{}}
# Populate the header
for kindex, kdata in itertools.groupby(reader, lambda x: x[kid]):
if kindex not in outData['header']:
outData['header'][kindex] = []
for xindex, data in itertools.groupby(kdata, lambda x: x[xaxis]):
outData['header'][kindex].append(xindex)
# Populate the statistical data
for stat in statType:
columnName = metric + '_' + stat
columnIndex = getIndex(header, columnName)
if stat not in outData:
outData[stat] = {}
for xindex, data in itertools.groupby(reader, lambda x: x[xaxis]):
for kindex, kdata in itertools.groupby(data, lambda x: x[kid]):
if kindex not in outData[stat]:
outData[stat][kindex] = []
value = [item[columnIndex] for item in kdata][0]
outData[stat][kindex].append(value)
# Plot the statistical data
title = model.upper() + ' model with ' + str("{:,}".format(lpCount)) + ' LPs'
subtitle = 'key = ' + keyLabel
outDir = dirPath + 'plots/' + rawDataFileName + '/'
outFile = outDir + fileName + "_" + metric + '.svg'
yaxisLabel = metric + '_(C.I._=_95%)'
plot(outData, outFile, title, subtitle, xaxisLabel, yaxisLabel, ystart, yend, ytics, '')
# Convert svg to pdf and delete svg
outPDF = outDir + fileName + "_" + metric + '.pdf'
subprocess.call(['inkscape', outFile, '--export-pdf', outPDF])
subprocess.call(['rm', outFile])
def calc_and_plot(dirPath):
# Load the sequential simulation time
seqFile = dirPath + 'sequential.dat'
if not os.path.exists(seqFile):
print('Sequential data not available')
sys.exit()
seqFp = open(seqFile, 'r')
seqCount, _, seqTime = seqFp.readline().split()
seqFp.close()
# Load data from csv file
inFile = dirPath + rawDataFileName + '.csv'
if not os.path.exists(inFile):
print(rawDataFileName.upper() + ' raw data not available')
sys.exit()
data = pd.read_csv(inFile, sep=',')
data['Event_Commitment_Ratio'] = \
data['Events_Processed'] / data['Events_Committed']
data['Total_Rollbacks'] = \
data['Primary_Rollbacks'] + data['Secondary_Rollbacks']
data['Event_Processing_Rate_(per_sec)'] = \
data['Events_Processed'] / data['Simulation_Runtime_(secs.)']
data['Speedup_w.r.t._Sequential_Simulation'] = \
float(seqTime) / data['Simulation_Runtime_(secs.)']
# Create the plots directory (if needed)
outDir = dirPath + 'plots/'
if not os.path.exists(outDir):
os.makedirs(outDir)
outName = outDir + rawDataFileName + '/'
subprocess.call(['rm', '-rf', outName])
subprocess.call(['mkdir', outName])
# Create the stats directory (if needed)
outDir = dirPath + 'stats/'
if not os.path.exists(outDir):
os.makedirs(outDir)
outName = outDir + rawDataFileName + '/'
subprocess.call(['rm', '-rf', outName])
subprocess.call(['mkdir', outName])
for searchAttrs in searchAttrsList:
groupbyList = searchAttrs['groupby']
filterName = searchAttrs['filter']
model = searchAttrs['model']
lpcount = searchAttrs['lpcount']
output = searchAttrs['output']
groupbyList.append(filterName)
# Read unique values for the filter
filterValues = data[filterName].unique().tolist()
# Read the model name and LP count
modelName = data[model].unique().tolist()
lpCount = data[lpcount].unique().tolist()
for filterValue in filterValues:
# Filter data for each filterValue
filteredData = data[data[filterName] == filterValue]
groupedData = filteredData.groupby(groupbyList)
columnNames = list(groupbyList)
# Generate stats
result = pd.DataFrame()
for param in metricList:
metric = param['name']
columnNames += [metric + '_' + x for x in statType]
stats = groupedData.apply(lambda x : statistics(x[metric].tolist()))
result = pd.concat([result, stats], axis=1)
# Write to the csv
fileName = output + str(filterValue)
outFile = outName + fileName + '.csv'
statFile = open(outFile,'w')
for colName in columnNames:
statFile.write(colName + ',')
statFile.write("\n")
statFile.close()
result.to_csv(outFile, mode='a', header=False, sep=',')
# Remove " from the newly created csv file
# Note: It is needed since pandas package has an unresolved bug for
# quoting arg which retains the double quotes for column attributes.
sed_inplace(outFile, r'"', '')
# Plot the statistics
plot_stats( dirPath, fileName, groupbyList[0], groupbyList[1],
filterName, filterValue, modelName[0], lpCount[0] )
def main():
dirPath = sys.argv[1]
if not os.path.exists(dirPath):
print('Invalid path to source')
sys.exit()
calc_and_plot(dirPath)
if __name__ == "__main__":
main()
| mit |
kivy-garden/garden.matplotlib | backend_kivy.py | 1 | 50958 | '''
Backend Kivy
=====
.. image:: images/backend_kivy_example.jpg
:align: right
The :class:`FigureCanvasKivy` widget is used to create a matplotlib graph.
This widget has the same properties as
:class:`kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg`. FigureCanvasKivy
instead of rendering a static image, uses the kivy graphics instructions
:class:`kivy.graphics.Line` and :class:`kivy.graphics.Mesh` to render on the
canvas.
Installation
------------
The matplotlib backend for kivy can be used by using the garden extension in
kivy following this .. _link: http://kivy.org/docs/api-kivy.garden.html ::
garden install matplotlib
Or if you want to include it directly on your application ::
cd myapp
garden install --app matplotlib
Initialization
--------------
A backend can be initialized in two ways. The first one is using pure pyplot
as explained
.. _here: http://matplotlib.org/faq/usage_faq.html#what-is-a-backend::
import matplotlib
matplotlib.use('module://kivy.garden.matplotlib.backend_kivy')
Once this is done, any figure instantiated after will be wrapped by a
:class:`FigureCanvasKivy` ready to use. From here there are two options to
continue with the development.
1. Use the :class:`FigureCanvasKivy` attribute defined as canvas from Figure,
to embed your matplotlib graph in your own Kivy application as can be seen in
the first example in the following section.
.. warning::
One can create a matplotlib widget by importing FigureCanvas::
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
or
from kivy.garden.matplotlib.backend_kivy import FigureCanvas
and then instantiate an object::
fig, ax = plt.subplots()
my_mpl_kivy_widget = FigureCanvas(fig)
which will certainly work but a problem will arise if events were connected
before the FigureCanvas is instantiated. If this approach is taken please
connect matplotlib events after generating the matplotlib kivy widget
object ::
fig, ax = plt.subplots()
fig.canvas.mpl_connect('button_press_event', callback_handler)
my_mpl_kivy_widget = FigureCanvas(fig)
In this scenario button_press_event won't be connected with the object
being created in line 3, because will be connected to the default canvas
set by matplotlib. If this approach is taken be sure of connecting the
events after instantiation like the following: ::
fig, ax = plt.subplots()
my_mpl_kivy_widget = FigureCanvas(fig)
fig.canvas.mpl_connect('button_press_event', callback_handler)
2. Use pyplot to write the application following matplotlib sintax as can be
seen in the second example below. In this case a Kivy application will be
created automatically from the matplotlib instructions and a NavigationToolbar
will be added to the main canvas.
Examples
--------
1. Example of a simple Hello world matplotlib App::
fig, ax = plt.subplots()
ax.text(0.6, 0.5, "hello", size=50, rotation=30.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
ax.text(0.5, 0.4, "world", size=50, rotation=-30.,
ha="right", va="top",
bbox=dict(boxstyle="square",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
canvas = fig.canvas
The object canvas can be added as a widget into the kivy tree widget.
If a change is done on the figure an update can be performed using
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.draw`.::
# update graph
canvas.draw()
The plot can be exported to png with
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.print_png`, as an
argument receives the `filename`.::
# export to png
canvas.print_png("my_plot.png")
2. Example of a pyplot application using matplotlib instructions::
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
figure, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind + width, womenMeans, width, color='y', yerr=womenStd)
ax.set_ylabel('----------------------Scores------------------')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width)
ax.set_yticklabels(('Ahh', '--G1--', 'G2', 'G3', 'G4', 'G5', 'G5',
'G5', 'G5'), rotation=90)
ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
plt.draw()
plt.savefig("test.png")
plt.show()
Navigation Toolbar
-----------------
If initialized by the first step a :class:`NavigationToolbarKivy` widget can be
created as well by instantiating an object with a :class:`FigureCanvasKivy` as
parameter. The actual widget is stored in its actionbar attribute.
This can be seen in test_backend.py example ::
bl = BoxLayout(orientation="vertical")
my_mpl_kivy_widget1 = FigureCanvasKivy(fig1)
my_mpl_kivy_widget2 = FigureCanvasKivy(fig2)
nav1 = NavigationToolbar2Kivy(my_mpl_kivy_widget1)
nav2 = NavigationToolbar2Kivy(my_mpl_kivy_widget2)
bl.add_widget(nav1.actionbar)
bl.add_widget(my_mpl_kivy_widget1)
bl.add_widget(nav2.actionbar)
bl.add_widget(my_mpl_kivy_widget2)
Connecting Matplotlib events to Kivy Events
-----------------------
All matplotlib events are available: `button_press_event` which is raised
on a mouse button clicked or on touch down, `button_release_event` which is
raised when a click button is released or on touch up, `key_press_event` which
is raised when a key is pressed, `key_release_event` which is raised when a key
is released, `motion_notify_event` which is raised when the mouse is on motion,
`resize_event` which is raised when the dimensions of the widget change,
`scroll_event` which is raised when the mouse scroll wheel is rolled,
`figure_enter_event` which is raised when mouse enters a new figure,
`figure_leave_event` which is raised when mouse leaves a figure,
`close_event` which is raised when the window is closed,
`draw_event` which is raised on canvas draw,
`pick_event` which is raised when an object is selected,
`idle_event` (deprecated),
`axes_enter_event` which is fired when mouse enters axes,
`axes_leave_event` which is fired when mouse leaves axes.::
def press(event):
print('press released from test', event.x, event.y, event.button)
def release(event):
print('release released from test', event.x, event.y, event.button)
def keypress(event):
print('key down', event.key)
def keyup(event):
print('key up', event.key)
def motionnotify(event):
print('mouse move to ', event.x, event.y)
def resize(event):
print('resize from mpl ', event)
def scroll(event):
print('scroll event from mpl ', event.x, event.y, event.step)
def figure_enter(event):
print('figure enter mpl')
def figure_leave(event):
print('figure leaving mpl')
def close(event):
print('closing figure')
fig.canvas.mpl_connect('button_press_event', press)
fig.canvas.mpl_connect('button_release_event', release)
fig.canvas.mpl_connect('key_press_event', keypress)
fig.canvas.mpl_connect('key_release_event', keyup)
fig.canvas.mpl_connect('motion_notify_event', motionnotify)
fig.canvas.mpl_connect('resize_event', resize)
fig.canvas.mpl_connect('scroll_event', scroll)
fig.canvas.mpl_connect('figure_enter_event', figure_enter)
fig.canvas.mpl_connect('figure_leave_event', figure_leave)
fig.canvas.mpl_connect('close_event', close)
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import matplotlib
import matplotlib.transforms as transforms
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.backend_bases import ShowBase, Event
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.mathtext import MathTextParser
from matplotlib import rcParams
from hashlib import md5
from matplotlib import _png
from matplotlib import _path
try:
import kivy
except ImportError:
raise ImportError("this backend requires Kivy to be installed.")
from kivy.app import App
from kivy.graphics.texture import Texture
from kivy.graphics import Rectangle
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.actionbar import ActionBar, ActionView, \
ActionButton, ActionToggleButton, \
ActionPrevious, ActionOverflow, ActionSeparator
from kivy.base import EventLoop
from kivy.core.text import Label as CoreLabel
from kivy.core.image import Image
from kivy.graphics import Color, Line
from kivy.graphics import Rotate, Translate
from kivy.graphics.instructions import InstructionGroup
from kivy.graphics.tesselator import Tesselator
from kivy.graphics.context_instructions import PopMatrix, PushMatrix
from kivy.graphics import StencilPush, StencilPop, StencilUse,\
StencilUnUse
from kivy.logger import Logger
from kivy.graphics import Mesh
from kivy.resources import resource_find
from kivy.uix.stencilview import StencilView
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty
from kivy.uix.textinput import TextInput
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.clock import Clock
from distutils.version import LooseVersion
_mpl_ge_1_5 = LooseVersion(matplotlib.__version__) >= LooseVersion('1.5.0')
_mpl_ge_2_0 = LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.0')
import numpy as np
import io
import textwrap
import uuid
import numbers
from functools import partial
from math import cos, sin, pi
kivy.require('1.9.1')
toolbar = None
my_canvas = None
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
class MPLKivyApp(App):
'''Creates the App initializing a FloatLayout with a figure and toolbar
widget.
'''
figure = ObjectProperty(None)
toolbar = ObjectProperty(None)
def build(self):
EventLoop.ensure_window()
layout = FloatLayout()
if self.figure:
self.figure.size_hint_y = 0.9
layout.add_widget(self.figure)
if self.toolbar:
self.toolbar.size_hint_y = 0.1
layout.add_widget(self.toolbar)
return layout
def draw_if_interactive():
'''Handle whether or not the backend is in interactive mode or not.
'''
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager:
figManager.canvas.draw_idle()
class Show(ShowBase):
'''mainloop needs to be overwritten to define the show() behavior for kivy
framework.
'''
def mainloop(self):
app = App.get_running_app()
if app is None:
app = MPLKivyApp(figure=my_canvas, toolbar=toolbar)
app.run()
show = Show()
def new_figure_manager(num, *args, **kwargs):
'''Create a new figure manager instance for the figure given.
'''
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
'''Create a new figure manager instance for the given figure.
'''
canvas = FigureCanvasKivy(figure)
manager = FigureManagerKivy(canvas, num)
global my_canvas
global toolbar
toolbar = manager.toolbar.actionbar if manager.toolbar else None
my_canvas = canvas
return manager
class RendererKivy(RendererBase):
'''The kivy renderer handles drawing/rendering operations. A RendererKivy
should be initialized with a FigureCanvasKivy widget. On initialization
a MathTextParser is instantiated to generate math text inside a
FigureCanvasKivy widget. Additionally a list to store clip_rectangles
is defined for elements that need to be clipped inside a rectangle such
as axes. The rest of the render is performed using kivy graphics
instructions.
'''
def __init__(self, widget):
super(RendererKivy, self).__init__()
self.widget = widget
self.dpi = widget.figure.dpi
self._markers = {}
# Can be enhanced by using TextToPath matplotlib, textpath.py
self.mathtext_parser = MathTextParser("Bitmap")
self.list_goraud_triangles = []
self.clip_rectangles = []
self.labels_inside_plot = []
def contains(self, widget, x, y):
'''Returns whether or not a point is inside the widget. The value
of the point is defined in x, y as kivy coordinates.
'''
left = widget.x
bottom = widget.y
top = widget.y + widget.height
right = widget.x + widget.width
return (left <= x <= right and
bottom <= y <= top)
def handle_clip_rectangle(self, gc, x, y):
'''It checks whether the point (x,y) collides with any already
existent stencil. If so it returns the index position of the
stencil it collides with. if the new clip rectangle bounds are
None it draws in the canvas otherwise it finds the correspondent
stencil or creates a new one for the new graphics instructions.
The point x,y is given in matplotlib coordinates.
'''
x = self.widget.x + x
y = self.widget.y + y
collides = self.collides_with_existent_stencil(x, y)
if collides > -1:
return collides
new_bounds = gc.get_clip_rectangle()
if new_bounds:
x = self.widget.x + int(new_bounds.bounds[0])
y = self.widget.y + int(new_bounds.bounds[1])
w = int(new_bounds.bounds[2])
h = int(new_bounds.bounds[3])
collides = self.collides_with_existent_stencil(x, y)
if collides == -1:
cliparea = StencilView(pos=(x, y), size=(w, h))
self.clip_rectangles.append(cliparea)
self.widget.add_widget(cliparea)
return len(self.clip_rectangles) - 1
else:
return collides
else:
return -2
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
'''Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
'''
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
# check whether an optimization is needed by calculating the cost of
# generating and use a path with the cost of emitting a path in-line.
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
# Generate an array of unique paths with the respective transformations
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
if _mpl_ge_2_0:
polygons = path.to_polygons(transform, closed_only=False)
else:
polygons = path.to_polygons(transform)
path_codes.append(polygons)
# Apply the styles and rgbFace to each one of the raw paths from
# the list. Additionally a transformation is being applied to
# translate each independent path
for xo, yo, path_poly, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
list_canvas_instruction = self.get_path_instructions(gc0, path_poly,
closed=True, rgbFace=rgbFace)
for widget, instructions in list_canvas_instruction:
widget.canvas.add(PushMatrix())
widget.canvas.add(Translate(xo, yo))
widget.canvas.add(instructions)
widget.canvas.add(PopMatrix())
def collides_with_existent_stencil(self, x, y):
'''Check all the clipareas and returns the index of the clip area that
contains this point. The point x, y is given in kivy coordinates.
'''
idx = -1
for cliparea in self.clip_rectangles:
idx += 1
if self.contains(cliparea, x, y):
return idx
return -1
def get_path_instructions(self, gc, polygons, closed=False, rgbFace=None):
'''With a graphics context and a set of polygons it returns a list
of InstructionGroups required to render the path.
'''
instructions_list = []
points_line = []
for polygon in polygons:
for x, y in polygon:
x = x + self.widget.x
y = y + self.widget.y
points_line += [float(x), float(y), ]
tess = Tesselator()
tess.add_contour(points_line)
if not tess.tesselate():
Logger.warning("Tesselator didn't work :(")
return
newclip = self.handle_clip_rectangle(gc, x, y)
if newclip > -1:
instructions_list.append((self.clip_rectangles[newclip],
self.get_graphics(gc, tess, points_line, rgbFace,
closed=closed)))
else:
instructions_list.append((self.widget,
self.get_graphics(gc, tess, points_line, rgbFace,
closed=closed)))
return instructions_list
def get_graphics(self, gc, polygons, points_line, rgbFace, closed=False):
'''Return an instruction group which contains the necessary graphics
instructions to draw the respective graphics.
'''
instruction_group = InstructionGroup()
if isinstance(gc.line['dash_list'], tuple):
gc.line['dash_list'] = list(gc.line['dash_list'])
if rgbFace is not None:
if len(polygons.meshes) != 0:
instruction_group.add(Color(*rgbFace))
for vertices, indices in polygons.meshes:
instruction_group.add(Mesh(
vertices=vertices,
indices=indices,
mode=str("triangle_fan")
))
instruction_group.add(Color(*gc.get_rgb()))
if _mpl_ge_1_5 and (not _mpl_ge_2_0) and closed:
points_poly_line = points_line[:-2]
else:
points_poly_line = points_line
if gc.line['width'] > 0:
instruction_group.add(Line(points=points_poly_line,
width=int(gc.line['width'] / 2),
dash_length=gc.line['dash_length'],
dash_offset=gc.line['dash_offset'],
dash_joint=gc.line['join_style'],
dash_list=gc.line['dash_list']))
return instruction_group
def draw_image(self, gc, x, y, im):
'''Render images that can be displayed on a matplotlib figure.
These images are generally called using imshow method from pyplot.
A Texture is applied to the FigureCanvas. The position x, y is
given in matplotlib coordinates.
'''
# Clip path to define an area to mask.
clippath, clippath_trans = gc.get_clip_path()
# Normal coordinates calculated and image added.
x = self.widget.x + x
y = self.widget.y + y
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.widget.width
h = self.widget.height
h, w = im.get_size_out()
rows, cols, image_str = im.as_rgba_str()
texture = Texture.create(size=(w, h))
texture.blit_buffer(image_str, colorfmt='rgba', bufferfmt='ubyte')
if clippath is None:
with self.widget.canvas:
Color(1.0, 1.0, 1.0, 1.0)
Rectangle(texture=texture, pos=(x, y), size=(w, h))
else:
if _mpl_ge_2_0:
polygons = clippath.to_polygons(clippath_trans, closed_only=False)
else:
polygons = clippath.to_polygons(clippath_trans)
list_canvas_instruction = self.get_path_instructions(gc, polygons,
rgbFace=(1.0, 1.0, 1.0, 1.0))
for widget, instructions in list_canvas_instruction:
widget.canvas.add(StencilPush())
widget.canvas.add(instructions)
widget.canvas.add(StencilUse())
widget.canvas.add(Color(1.0, 1.0, 1.0, 1.0))
widget.canvas.add(Rectangle(texture=texture,
pos=(x, y), size=(w, h)))
widget.canvas.add(StencilUnUse())
widget.canvas.add(StencilPop())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
'''Render text that is displayed in the canvas. The position x, y is
given in matplotlib coordinates. A `GraphicsContextKivy` is given
to render according to the text properties such as color, size, etc.
An angle is given to change the orientation of the text when needed.
If the text is a math expression it will be rendered using a
MathText parser.
'''
if mtext:
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
angle_rad = mtext.get_rotation() * np.pi / 180.
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
if mtext.get_rotation_mode() == "anchor":
# if anchor mode, rotation is undone first
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
ha, va = mtext.get_ha(), mtext.get_va()
if ha == "center":
ax -= w / 2
elif ha == "right":
ax -= w
if va == "top":
ay -= h
elif va == "center":
ay -= h / 2
if mtext.get_rotation_mode() != "anchor":
# if not anchor mode, rotation is undone last
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
x, y = ax, ay
x += self.widget.x
y += self.widget.y
if ismath:
self.draw_mathtext(gc, x, y, s, prop, angle)
else:
font = resource_find(prop.get_name() + ".ttf")
color = gc.get_rgb()
if font is None:
plot_text = CoreLabel(font_size=prop.get_size_in_points(), color=color)
else:
plot_text = CoreLabel(font_size=prop.get_size_in_points(),
font_name=prop.get_name(), color=color)
plot_text.text = six.text_type("{}".format(s))
if prop.get_style() == 'italic':
plot_text.italic = True
if self.weight_as_number(prop.get_weight()) > 500:
plot_text.bold = True
plot_text.refresh()
with self.widget.canvas:
if isinstance(angle, float):
PushMatrix()
Rotate(angle=angle, origin=(int(x), int(y)))
Rectangle(pos=(int(x), int(y)), texture=plot_text.texture,
size=plot_text.texture.size)
PopMatrix()
else:
Rectangle(pos=(int(x), int(y)), texture=plot_text.texture,
size=plot_text.texture.size)
def draw_mathtext(self, gc, x, y, s, prop, angle):
'''Draw the math text using matplotlib.mathtext. The position
x,y is given in Kivy coordinates.
'''
ftimage, depth = self.mathtext_parser.parse(s, self.dpi, prop)
w = ftimage.get_width()
h = ftimage.get_height()
texture = Texture.create(size=(w, h))
if _mpl_ge_1_5:
texture.blit_buffer(ftimage.as_rgba_str()[0][0], colorfmt='rgba',
bufferfmt='ubyte')
else:
texture.blit_buffer(ftimage.as_rgba_str(), colorfmt='rgba',
bufferfmt='ubyte')
texture.flip_vertical()
with self.widget.canvas:
Rectangle(texture=texture, pos=(x, y), size=(w, h))
def draw_path(self, gc, path, transform, rgbFace=None):
'''Produce the rendering of the graphics elements using
:class:`kivy.graphics.Line` and :class:`kivy.graphics.Mesh` kivy
graphics instructions. The paths are converted into polygons and
assigned either to a clip rectangle or to the same canvas for
rendering. Paths are received in matplotlib coordinates. The
aesthetics is defined by the `GraphicsContextKivy` gc.
'''
if _mpl_ge_2_0:
polygons = path.to_polygons(transform, self.widget.width,
self.widget.height, closed_only=False)
else:
polygons = path.to_polygons(transform, self.widget.width,
self.widget.height)
list_canvas_instruction = self.get_path_instructions(gc, polygons,
closed=True, rgbFace=rgbFace)
for widget, instructions in list_canvas_instruction:
widget.canvas.add(instructions)
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
'''Markers graphics instructions are stored on a dictionary and
hashed through graphics context and rgbFace values. If a marker_path
with the corresponding graphics context exist then the instructions
are pulled from the markers dictionary.
'''
if not len(path.vertices):
return
# get a string representation of the path
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
# get a string representation of the graphics context and rgbFace.
style = str(gc._get_style_dict(rgbFace))
dictkey = (path_data, str(style))
# check whether this marker has been created before.
list_instructions = self._markers.get(dictkey)
# creating a list of instructions for the specific marker.
if list_instructions is None:
if _mpl_ge_2_0:
polygons = marker_path.to_polygons(marker_trans, closed_only=False)
else:
polygons = marker_path.to_polygons(marker_trans)
self._markers[dictkey] = self.get_path_instructions(gc,
polygons, rgbFace=rgbFace)
# Traversing all the positions where a marker should be rendered
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
for widget, instructions in self._markers[dictkey]:
widget.canvas.add(PushMatrix())
widget.canvas.add(Translate(x, y))
widget.canvas.add(instructions)
widget.canvas.add(PopMatrix())
def flipy(self):
return False
def _convert_path(self, path, transform=None, clip=None, simplify=None,
sketch=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
if _mpl_ge_1_5:
return _path.convert_to_string(
path, transform, clip, simplify, sketch, 6,
[b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
else:
return _path.convert_to_svg(path, transform, clip, simplify, 6)
def get_canvas_width_height(self):
'''Get the actual width and height of the widget.
'''
return self.widget.width, self.widget.height
def get_text_width_height_descent(self, s, prop, ismath):
'''This method is needed specifically to calculate text positioning
in the canvas. Matplotlib needs the size to calculate the points
according to their layout
'''
if ismath:
ftimage, depth = self.mathtext_parser.parse(s, self.dpi, prop)
w = ftimage.get_width()
h = ftimage.get_height()
return w, h, depth
font = resource_find(prop.get_name() + ".ttf")
if font is None:
plot_text = CoreLabel(font_size=prop.get_size_in_points())
else:
plot_text = CoreLabel(font_size=prop.get_size_in_points(),
font_name=prop.get_name())
plot_text.text = six.text_type("{}".format(s))
plot_text.refresh()
return plot_text.texture.size[0], plot_text.texture.size[1], 1
def new_gc(self):
'''Instantiate a GraphicsContextKivy object
'''
return GraphicsContextKivy(self.widget)
def points_to_pixels(self, points):
return points / 72.0 * self.dpi
def weight_as_number(self, weight):
''' Replaces the deprecated matplotlib function of the same name
'''
# Return if number
if isinstance(weight, numbers.Number):
return weight
# else use the mapping of matplotlib 2.2
elif weight == 'ultralight':
return 100
elif weight == 'light':
return 200
elif weight == 'normal':
return 400
elif weight == 'regular':
return 400
elif weight == 'book':
return 500
elif weight == 'medium':
return 500
elif weight == 'roman':
return 500
elif weight == 'semibold':
return 600
elif weight == 'demibold':
return 600
elif weight == 'demi':
return 600
elif weight == 'bold':
return 700
elif weight == 'heavy':
return 800
elif weight == 'extra bold':
return 800
elif weight == 'black':
return 900
else:
raise ValueError('weight ' + weight + ' not valid')
class NavigationToolbar2Kivy(NavigationToolbar2):
'''This class extends from matplotlib class NavigationToolbar2 and
creates an action bar which is added to the main app to allow the
following operations to the figures.
Home: Resets the plot axes to the initial state.
Left: Undo an operation performed.
Right: Redo an operation performed.
Pan: Allows to drag the plot.
Zoom: Allows to define a rectangular area to zoom in.
Configure: Loads a pop up for repositioning elements.
Save: Loads a Save Dialog to generate an image.
'''
def __init__(self, canvas, **kwargs):
self.actionbar = ActionBar(pos_hint={'top': 1.0})
super(NavigationToolbar2Kivy, self).__init__(canvas)
self.rubberband_color = (1.0, 0.0, 0.0, 1.0)
self.lastrect = None
self.save_dialog = Builder.load_string(textwrap.dedent('''\
<SaveDialog>:
text_input: text_input
BoxLayout:
size: root.size
pos: root.pos
orientation: "vertical"
FileChooserListView:
id: filechooser
on_selection: text_input.text = self.selection and\
self.selection[0] or ''
TextInput:
id: text_input
size_hint_y: None
height: 30
multiline: False
BoxLayout:
size_hint_y: None
height: 30
Button:
text: "Cancel"
on_release: root.cancel()
Button:
text: "Save"
on_release: root.save(filechooser.path,\
text_input.text)
'''))
def _init_toolbar(self):
'''A Toolbar is created with an ActionBar widget in which buttons are
added with a specific behavior given by a callback. The buttons
properties are given by matplotlib.
'''
basedir = os.path.join(rcParams['datapath'], 'images')
actionview = ActionView()
actionprevious = ActionPrevious(title="Navigation", with_previous=False)
actionoverflow = ActionOverflow()
actionview.add_widget(actionprevious)
actionview.add_widget(actionoverflow)
actionview.use_separator = True
self.actionbar.add_widget(actionview)
id_group = uuid.uuid4()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
actionview.add_widget(ActionSeparator())
continue
fname = os.path.join(basedir, image_file + '.png')
if text in ['Pan', 'Zoom']:
action_button = ActionToggleButton(text=text, icon=fname,
group=id_group)
else:
action_button = ActionButton(text=text, icon=fname)
action_button.bind(on_press=getattr(self, callback))
actionview.add_widget(action_button)
def configure_subplots(self, *largs):
'''It will be implemented later.'''
pass
def dismiss_popup(self):
self._popup.dismiss()
def show_save(self):
'''Displays a popup widget to perform a save operation.'''
content = SaveDialog(save=self.save, cancel=self.dismiss_popup)
self._popup = Popup(title="Save file", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def save(self, path, filename):
self.canvas.export_to_png(os.path.join(path, filename))
self.dismiss_popup()
def save_figure(self, *args):
self.show_save()
def draw_rubberband(self, event, x0, y0, x1, y1):
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1) + self.canvas.x, min(y0, y1)
+ self.canvas.y, w, h)]
if self.lastrect is None:
self.canvas.canvas.add(Color(*self.rubberband_color))
else:
self.canvas.canvas.remove(self.lastrect)
self.lastrect = InstructionGroup()
self.lastrect.add(Line(rectangle=rect, width=1.0, dash_length=5.0,
dash_offset=5.0))
self.lastrect.add(Color(1.0, 0.0, 0.0, 0.2))
self.lastrect.add(Rectangle(pos=(rect[0], rect[1]),
size=(rect[2], rect[3])))
self.canvas.canvas.add(self.lastrect)
def release_zoom(self, event):
self.lastrect = None
return super(NavigationToolbar2Kivy, self).release_zoom(event)
class GraphicsContextKivy(GraphicsContextBase, object):
'''The graphics context provides the color, line styles, etc... All the
mapping between matplotlib and kivy styling is done here.
The GraphicsContextKivy stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0) such as in the Kivy framework.
Lines properties and styles are set accordingly to the kivy framework
definition for Line.
'''
_capd = {
'butt': 'square',
'projecting': 'square',
'round': 'round',
}
line = {}
def __init__(self, renderer):
super(GraphicsContextKivy, self).__init__()
self.renderer = renderer
self.line['cap_style'] = self.get_capstyle()
self.line['join_style'] = self.get_joinstyle()
self.line['dash_offset'] = None
self.line['dash_length'] = None
self.line['dash_list'] = []
def set_capstyle(self, cs):
'''Set the cap style based on the kivy framework cap styles.
'''
GraphicsContextBase.set_capstyle(self, cs)
self.line['cap_style'] = self._capd[self._capstyle]
def set_joinstyle(self, js):
'''Set the join style based on the kivy framework join styles.
'''
GraphicsContextBase.set_joinstyle(self, js)
self.line['join_style'] = js
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
# dash_list is a list with numbers denoting the number of points
# in a dash and if it is on or off.
if dash_list is not None:
self.line['dash_list'] = dash_list
if dash_offset is not None:
self.line['dash_offset'] = int(dash_offset)
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
self.line['width'] = w
def _get_style_dict(self, rgbFace):
'''Return the style string. style is generated from the
GraphicsContext and rgbFace
'''
attrib = {}
forced_alpha = self.get_forced_alpha()
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = str(rgbFace)
if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
if forced_alpha and self.get_alpha() != 1.0:
attrib['opacity'] = str(self.get_alpha())
offset, seq = self.get_dashes()
if seq is not None:
attrib['line-dasharray'] = ','.join(['%f' % val for val in seq])
attrib['line-dashoffset'] = six.text_type(float(offset))
linewidth = self.get_linewidth()
if linewidth:
rgb = self.get_rgb()
attrib['line'] = str(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['line-opacity'] = str(rgb[3])
if linewidth != 1.0:
attrib['line-width'] = str(linewidth)
if self.get_joinstyle() != 'round':
attrib['line-linejoin'] = self.get_joinstyle()
if self.get_capstyle() != 'butt':
attrib['line-linecap'] = _capd[self.get_capstyle()]
return attrib
class TimerKivy(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Kivy for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = Clock.schedule_interval(self._on_timer, self._interval / 1000.0)
def _timer_stop(self):
if self._timer is not None:
Clock.unschedule(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self, dt):
super(TimerKivy, self)._on_timer()
class FigureCanvasKivy(FocusBehavior, Widget, FigureCanvasBase):
'''FigureCanvasKivy class. See module documentation for more information.
'''
def __init__(self, figure, **kwargs):
Window.bind(mouse_pos=self._on_mouse_pos)
self.bind(size=self._on_size_changed)
self.bind(pos=self._on_pos_changed)
self.entered_figure = True
self.figure = figure
super(FigureCanvasKivy, self).__init__(figure=self.figure, **kwargs)
def draw(self):
'''Draw the figure using the KivyRenderer
'''
self.clear_widgets()
self.canvas.clear()
self._renderer = RendererKivy(self)
self.figure.draw(self._renderer)
def on_touch_down(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `scroll_event`, `button_press_event`,
`enter_notify_event` and `leave_notify_event`
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
if super(FigureCanvasKivy, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos):
self.motion_notify_event(x, y, guiEvent=None)
touch.grab(self)
if 'button' in touch.profile and touch.button in ("scrollup", "scrolldown",):
self.scroll_event(x, y, 5, guiEvent=None)
else:
self.button_press_event(x, y, self.get_mouse_button(touch),
dblclick=False, guiEvent=None)
if self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=None)
else:
if not self.entered_figure:
self.leave_notify_event(guiEvent=None)
return False
def on_touch_move(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `enter_notify_event` and `leave_notify_event`
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
inside = self.collide_point(touch.x, touch.y)
if inside:
self.motion_notify_event(x, y, guiEvent=None)
if not inside and not self.entered_figure:
self.leave_notify_event(guiEvent=None)
self.entered_figure = True
elif inside and self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=(x, y))
self.entered_figure = False
return False
def get_mouse_button(self, touch):
'''Translate kivy convention for left, right and middle click button
into matplotlib int values: 1 for left, 2 for middle and 3 for
right.
'''
if 'button' in touch.profile:
if touch.button == "left":
return 1
elif touch.button == "middle":
return 2
elif touch.button == "right":
return 3
return -1
def on_touch_up(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`scroll_event` and `button_release_event`.
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
if touch.grab_current is self:
if 'button' in touch.profile and touch.button in ("scrollup", "scrolldown",):
self.scroll_event(x, y, 5, guiEvent=None)
else:
self.button_release_event(x, y, self.get_mouse_button(touch), guiEvent=None)
touch.ungrab(self)
else:
return super(FigureCanvasKivy, self).on_touch_up(touch)
return False
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''Kivy event to trigger matplotlib `key_press_event`.
'''
self.key_press_event(keycode[1], guiEvent=None)
return super(FigureCanvasKivy, self).keyboard_on_key_down(window,
keycode, text, modifiers)
def keyboard_on_key_up(self, window, keycode):
'''Kivy event to trigger matplotlib `key_release_event`.
'''
self.key_release_event(keycode[1], guiEvent=None)
return super(FigureCanvasKivy, self).keyboard_on_key_up(window, keycode)
def _on_mouse_pos(self, *args):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `leave_notify_event` and
`enter_notify_event`.
'''
pos = args[1]
newcoord = self.to_widget(pos[0], pos[1], relative=True)
x = newcoord[0]
y = newcoord[1]
inside = self.collide_point(*pos)
if inside:
self.motion_notify_event(x, y, guiEvent=None)
if not inside and not self.entered_figure:
self.leave_notify_event(guiEvent=None)
self.entered_figure = True
elif inside and self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=(pos[0], pos[1]))
self.entered_figure = False
def enter_notify_event(self, guiEvent=None, xy=None):
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def leave_notify_event(self, guiEvent=None):
event = Event('figure_leave_event', self, guiEvent)
self.callbacks.process('figure_leave_event', event)
def _on_pos_changed(self, *args):
self.draw()
def _on_size_changed(self, *args):
'''Changes the size of the matplotlib figure based on the size of the
widget. The widget will change size according to the parent Layout
size.
'''
w, h = self.size
dpival = self.figure.dpi
winch = float(w) / dpival
hinch = float(h) / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self.resize_event()
self.draw()
def callback(self, *largs):
self.draw()
def blit(self, bbox=None):
'''If bbox is None, blit the entire canvas to the widget. Otherwise
blit only the area defined by the bbox.
'''
self.blitbox = bbox
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['png'] = 'Portable Network Graphics'
def print_png(self, filename, *args, **kwargs):
'''Call the widget function to make a png of the widget.
'''
fig = FigureCanvasAgg(self.figure)
FigureCanvasAgg.draw(fig)
l, b, w, h = self.figure.bbox.bounds
texture = Texture.create(size=(w, h))
texture.blit_buffer(bytes(fig.get_renderer().buffer_rgba()),
colorfmt='rgba', bufferfmt='ubyte')
texture.flip_vertical()
img = Image(texture)
img.save(filename)
def get_default_filetype(self):
return 'png'
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerKivy(*args, **kwargs)
class FigureManagerKivy(FigureManagerBase):
'''The FigureManager main function is to instantiate the backend navigation
toolbar and to call show to instantiate the App.
'''
def __init__(self, canvas, num):
super(FigureManagerKivy, self).__init__(canvas, num)
self.canvas = canvas
self.toolbar = self._get_toolbar()
def show(self):
pass
def get_window_title(self):
return Window.title
def set_window_title(self, title):
Window.title = title
def resize(self, w, h):
if (w > 0) and (h > 0):
Window.size = w, h
def _get_toolbar(self):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Kivy(self.canvas)
else:
toolbar = None
return toolbar
'''Now just provide the standard names that backend.__init__ is expecting
'''
FigureCanvas = FigureCanvasKivy
FigureManager = FigureManagerKivy
NavigationToolbar = NavigationToolbar2Kivy
| mit |
buckiracer/data-science-from-scratch | dataScienceFromScratch/DataScienceFromScratch/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
stargaser/astropy | examples/io/split-jpeg-to-fits.py | 3 | 2472 | # -*- coding: utf-8 -*-
"""
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <http://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print("Image size: {} x {}".format(xsize, ysize))
plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
##############################################################################
# Write out the channels as separate FITS images
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
| bsd-3-clause |
Averroes/statsmodels | statsmodels/tsa/statespace/tests/test_tools.py | 19 | 4268 | """
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from statsmodels.tsa.statespace import tools
# from .results import results_sarimax
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal, assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],[2,0,1],[3,0,0]]))
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
| bsd-3-clause |
peckhams/topoflow | topoflow/components/met_base.py | 1 | 111479 |
## Does "land_surface_air__latent_heat_flux" make sense? (2/5/13)
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sep 2014. Fixed sign error in update_bulk_richardson_number().
# Ability to compute separate P_snow and P_rain.
# Aug 2014. New CSDMS Standard Names and clean up.
# Nov 2013. Converted TopoFlow to a Python package.
#
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names (version 0.7.9) and BMI.
# May 2012. P is now a 1D array with one element and mutable,
# so any comp with ref to it can see it change.
# Jun 2010. update_net_shortwave_radiation(), etc.
# May 2010. Changes to initialize() and read_cfg_file().
# Aug 2009
# Jan 2009. Converted from IDL.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a "base class" for meteorology
# components as well as any functions used by most or
# all meteorology methods. The methods of this class
# should be over-ridden as necessary for different
# methods of modeling meteorology.
#-----------------------------------------------------------------------
# Notes: Do we ever need to distinguish between a surface
# temperature and snow temperature (in the snow) ?
# Recall that a separate T_soil_x variable is used
# to compute Qc.
#
# Cp_snow is from NCAR CSM Flux Coupler web page
#
# rho_H2O is currently not adjustable with GUI. (still true?)
#
#-----------------------------------------------------------------------
#
# class met_component (inherits from BMI_base.py)
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/15/12)
# get_var_units() # (5/15/12)
# ---------------------
# set_constants()
# initialize()
# update()
# finalize()
# ----------------------------
# set_computed_input_vars()
# initialize_computed_vars()
# ----------------------------
# update_P_integral()
# update_P_max()
# update_P_rain() # (9/14/14, new method)
# update_P_snow() # (9/14/14, new method)
# ------------------------------------
# update_bulk_richardson_number()
# update_bulk_aero_conductance()
# update_sensible_heat_flux()
# update_saturation_vapor_pressure()
# update_vapor_pressure()
# update_dew_point() # (7/6/10)
# update_precipitable_water_content() # (7/6/10)
# ------------------------------------
# update_latent_heat_flux()
# update_conduction_heat_flux()
# update_advection_heat_flux()
# ------------------------------------
# update_julian_day() # (7/1/10)
# update_net_shortwave_radiation() # (7/1/10)
# update_em_air() # (7/1/10)
# update_net_longwave_radiation() # (7/1/10)
# update_net_energy_flux() # ("Q_sum")
# ------------------------------------
# open_input_files()
# read_input_files()
# close_input_files()
# ------------------------------------
# update_outfile_names()
# open_output_files()
# write_output_files()
# close_output_files()
# save_grids()
# save_pixel_values()
#
# Functions:
# compare_em_air_methods()
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import solar_funcs as solar
from topoflow.utils import BMI_base
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import rtg_files
#-----------------------------------------------------------------------
class met_component( BMI_base.BMI_component ):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'TopoFlow_Meteorology',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#-------------------------------------------------------------
'comp_name': 'Meteorology',
'model_family': 'TopoFlow',
'cfg_template_file': 'Meteorology.cfg.in',
'cfg_extension': '_meteorology.cfg',
'cmt_var_prefix': '/Meteorology/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Meteorology.xml',
'dialog_title': 'Meteorology: Method 1 Parameters',
'time_units': 'seconds' }
#---------------------------------------------------------
# Note that SWE = "snow water equivalent", but it really
# just means "liquid_equivalent".
#---------------------------------------------------------
_input_var_names = [
'snowpack__z_mean_of_mass-per-volume_density', # rho_snow
'snowpack__depth', # h_snow
'snowpack__liquid-equivalent_depth', # h_swe
'snowpack__melt_volume_flux' ] # SM (MR used for ice?)
#-----------------------------------------------------------
# albedo, emissivity and transmittance are dimensionless.
#-----------------------------------------------------------
# "atmosphere_aerosol_dust__reduction_of_transmittance" vs.
# This TF parameter comes from Dingman, App. E, p. 604.
#-----------------------------------------------------------
# There is an Optical_Air_Mass function in solar_funcs.py.
# However, this quantity is not saved in comp state.
#
# "optical_path_length_ratio" vs. "optical_air_mass" OR
# "airmass_factor" OR "relative_airmass" OR
# "relative_optical_path_length"
#-----------------------------------------------------------
# Our term "liquid_equivalent_precipitation" is widely
# used on the Internet, with 374,000 Google hits.
#--------------------------------------------------------------
# Note: "bulk exchange coefficient" has 2460 Google hits.
# It is closely related to a "transfer coefficient"
# for mass, momentum or heat. There are no CF
# Standard Names with "bulk", "exchange" or "transfer".
#
# Zhang et al. (2000) use "bulk exchange coefficient" in a
# nonstandard way, with units of velocity vs. unitless.
#
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
#---------------------------------------------------------------
# Now this component uses T_air to break the liquid-equivalent
# precip rate into separate P_rain and P_snow components.
# P_rain is used by channel_base.update_R()
# P_snow is used by snow_base.update_depth()
#---------------------------------------------------------------
_output_var_names = [
# 'atmosphere__optical_path_length_ratio', # M_opt [1] (in solar_funcs.py)
# 'atmosphere__von_karman_constant', # kappa
'atmosphere_aerosol_dust__reduction_of_transmittance', # dust_atten ##### (from GUI)
'atmosphere_air-column_water-vapor__liquid-equivalent_depth', # W_p ("precipitable depth")
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor', # canopy_factor
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor', # cloud_factor
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance', # De [m s-1], latent
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance', # Dh [m s-1], sensible
'atmosphere_bottom_air__emissivity', # em_air
'atmosphere_bottom_air__mass-per-volume_density', # rho_air
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity', # Cp_air
'atmosphere_bottom_air__neutral_bulk_aerodynamic_conductance', # Dn [m s-1], neutral
'atmosphere_bottom_air__pressure', # p0
'atmosphere_bottom_air__temperature', # T_air
'atmosphere_bottom_air_flow__bulk_richardson_number', # Ri [1]
'atmosphere_bottom_air_flow__log_law_roughness_length', # z0_air
'atmosphere_bottom_air_flow__reference-height_speed', # uz
'atmosphere_bottom_air_flow__speed_reference_height', # z
'atmosphere_bottom_air_land_net-latent-heat__energy_flux', # Qe [W m-2]
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux', # Qh [W m-2]
'atmosphere_bottom_air_water-vapor__dew_point_temperature', # T_dew
'atmosphere_bottom_air_water-vapor__partial_pressure', # e_air # (insert "reference_height" ??)
'atmosphere_bottom_air_water-vapor__relative_saturation', # RH
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure', # e_sat_air
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux', # vol_P
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux', # P_max
'atmosphere_water__precipitation_leq-volume_flux', # P [m s-1]
'atmosphere_water__rainfall_volume_flux', # P_rain [m s-1] (liquid)
'atmosphere_water__snowfall_leq-volume_flux', # P_snow [m s-1]
'earth__standard_gravity_constant', # g [m s-2]
'land_surface__albedo', # albedo
'land_surface__aspect_angle', # alpha (from GUI)
'land_surface__emissivity', # em_surf
'land_surface__latitude', # lat_deg [degrees]
'land_surface__longitude', # lon_deg [degrees]
'land_surface__slope_angle', # beta (from GUI)
'land_surface__temperature', # T_surf ### OR JUST "land__temperature"?
# 'land_surface_air__temperature', # T_air
'land_surface_air_water-vapor__partial_pressure', # e_surf # (insert "reference_height" ??)
'land_surface_air_water-vapor__saturated_partial_pressure', # e_sat_surf
'land_surface_net-longwave-radiation__energy_flux', # Qn_LW [W m-2]
'land_surface_net-shortwave-radiation__energy_flux', # Qn_SW [W m-2]
'land_surface_net-total-energy__energy_flux', # Q_sum [W w-2]
'model__time_step', # dt
'physics__stefan_boltzmann_constant', # sigma [W m-2 K-4]
'physics__von_karman_constant', # kappa [1]
'water__mass-specific_latent_fusion_heat', # Lf [J kg-1]
'water__mass-specific_latent_vaporization_heat', # Lv [J kg-1]
'water-liquid__mass-per-volume_density' ] # rho_H2O
#-----------------------------------------
# These are used only in solar_funcs.py
# Later, create a Radiation component.
#---------------------------------------------
# Should we allow "day" as a base quantity ?
# "day_length" is confusing. Think about "date" also.
# Maybe something like:
#
# "earth__mean_solar_rotation_period"
# "earth__sidereal_rotation_period"
# "earth__stellar_rotation_period" (relative to "fixed stars")
# maybe: "earth__complete_rotation_period" ??
#
# OR:
# "earth_mean_solar_day__duration"
# "earth_sidereal_day__duration"
# "earth_stellar_day__duration"
#
# OR perhaps:
# "earth_mean_solar_day__rotation_period"
# "earth_sidereal_day__rotation_period"
# "earth_stellar_day__rotation_period"
#
# "stellar rotation period" gives 84,500 Google hits.
# "solar_rotation_period" gives 41,100 Google hits.
# "sidereal_roation_period" gives 86,000 Google hits.
# "stellar day" gives 136,000 Google hits (but many unrelated).
#
# NB! "stellar_rotation_period" is ambiguous since it is also
# used for the rotation period of a star.
#
# "earth_mean_solar_day__hour_count" ("standard_day" ?)
# "earth_sidereal_day__hour_count"
# "earth_sidereal_day__duration"
# "earth__rotation_period" = "sidereal_day"
#
# "earth_stellar_day__period" ??
# "earth_stellar_day__duration" ??
#
#------------------------------------------------------------------
# For "earth__rotation_rate", it seems this should be based on
# the sidereal day (23.93 hours) instead of the mean solar day.
#------------------------------------------------------------------
# There are at least a few online sources that use both terms:
# "equivalent latitude" and "equivalent longitude". See:
# "The Construction and Application of a Martian Snowpack Model".
#------------------------------------------------------------------
# Adopt the little-used term: "topographic_sunrise" ?
# Or maybe "illuminated_topography", or "local_sunrise" ??
#------------------------------------------------------------------
# For angle relations between the earth and the sun, should we
# just use the adjective "solar" in the quantity name or include
# sun in the object name? We could also use terms like:
# earth_to_sun__declination_angle
# earth_to_sun__right_ascension_angle
#
#------------------------------------------------------------------
# The adjective "local" in "earth_local_apparent_noon__time"
# may be helpful in other contexts such as:
# 'earth__local_longitude' and 'land_surface__local_elevation'.
#------------------------------------------------------------------
# 'earth__autumnal_equinox_date',
# 'earth__autumnal_equinox_time',
# 'earth_axis__ecliptic_tilt_angle', # tilt_angle
# 'earth__julian_day_number', ########
# 'earth__julian_day_angle',
# 'earth__local_apparent_noon_time'
# 'earth__mean_radius',
# 'earth__mean_solar_day_duration', # (exactly 24 hours)
# 'earth_orbit__eccentricity',
# 'earth_orbit__period', # (one year)
# 'earth__perihelion_julian_day', ######
# 'earth__rotation_period', ######
# 'earth__rotation_rate', # Omega ###### What about Angular Velocity ?
# 'earth__sidereal_day_duration', # (one rotation = 23.934470 hours)
# 'earth__solar_declination_angle',
# 'earth__solar_hour_angle',
# 'earth__solar_irradiation_constant', ## (or "insolation_constant" ??)
# 'earth__solar_right_ascension_angle',
# 'earth__solar_vertical_angle', (complement of zenith angle)
# 'earth__solar_zenith_angle',
# 'earth__stellar_day_duration', # (relative to the "fixed stars")
# 'earth__summer_solstice_date',
# 'earth__summer_solstice_time',
# 'earth__topographic_sunrise_equivalent_latitude',
# 'earth__topographic_sunrise_equivalent_longitude', (flat_lon + offset)
# 'earth__topographic_sunrise_equivalent_longitude_offset',
# 'earth__topographic_sunrise_time',
# 'earth__topographic_sunset_time',
# 'earth_true_solar_noon___time', #####
# 'earth_clock__true_solar_noon_time'
# 'earth__vernal_equinox_date',
# 'earth__vernal_equinox_time',
# 'earth__winter_solstice_date',
# 'earth__winter_solstice_time',
#
# What about a "slope_corrected" or "topographic" version of K_dir ?
#
# 'land_surface__backscattered_shortwave_irradiation_flux', # K_bs
# 'land_surface__diffuse_shortwave_irradiation_flux', # K_dif
# 'land_surface__direct_shortwave_irradiation_flux', # K_dir
# 'land_surface__global_shortwave_irradiation_flux', # K_glob = K_dif + K_dir
#------------------------------------------------------------------
#------------------------------------------------------------------
# Maybe we should rename "z" to "z_ref" and "uz" to "uz_ref" ?
#------------------------------------------------------------------
_var_name_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'rho_snow',
'snowpack__depth': 'h_snow',
'snowpack__liquid-equivalent_depth': 'h_swe',
'snowpack__melt_volume_flux': 'SM', # (MR is used for ice)
#-----------------------------------------------------------------
#'atmosphere__optical_path_length_ratio': 'M_opt', # (in solar_funcs.py)
# 'atmosphere__von_karman_constant': 'kappa',
'atmosphere_aerosol_dust__reduction_of_transmittance': 'dust_atten',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'W_p', #########
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': 'canopy_factor',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': 'cloud_factor',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'De',
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'Dh',
'atmosphere_bottom_air__emissivity': 'em_air',
'atmosphere_bottom_air__mass-per-volume_density': 'rho_air',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'Cp_air',
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'Dn',
'atmosphere_bottom_air__pressure': 'p0',
'atmosphere_bottom_air__temperature': 'T_air',
'atmosphere_bottom_air_flow__bulk_richardson_number': 'Ri',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'z0_air', ## (not "z0")
'atmosphere_bottom_air_flow__reference-height_speed': 'uz',
'atmosphere_bottom_air_flow__speed_reference_height': 'z',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'Qe',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'Qh',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'T_dew',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'e_air',
'atmosphere_bottom_air_water-vapor__relative_saturation': 'RH',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'e_sat_air',
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'vol_P',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'P_max',
'atmosphere_water__precipitation_leq-volume_flux': 'P',
'atmosphere_water__rainfall_volume_flux': 'P_rain',
'atmosphere_water__snowfall_leq-volume_flux': 'P_snow',
'earth__standard_gravity_constant': 'g',
'land_surface__albedo': 'albedo',
'land_surface__aspect_angle': 'alpha',
'land_surface__emissivity': 'em_surf',
'land_surface__latitude': 'lat_deg',
'land_surface__longitude': 'lon_deg',
'land_surface__slope_angle': 'beta',
'land_surface__temperature': 'T_surf',
# 'land_surface_air__temperature': 'T_surf',
'land_surface_air_water-vapor__partial_pressure': 'e_surf',
'land_surface_air_water-vapor__saturated_partial_pressure': 'e_sat_surf',
'land_surface_net-longwave-radiation__energy_flux': 'Qn_LW',
'land_surface_net-shortwave-radiation__energy_flux': 'Qn_SW',
'land_surface_net-total-energy__energy_flux': 'Q_sum',
'model__time_step': 'dt',
'physics__stefan_boltzmann_constant': 'sigma',
'physics__von_karman_constant': 'kappa',
'water__mass-specific_latent_fusion_heat': 'Lf',
'water__mass-specific_latent_vaporization_heat': 'Lv',
'water-liquid__mass-per-volume_density': 'rho_H2O' }
#-----------------------------------------------------------------
# Note: The "update()" function calls several functions with the
# MBAR keyword set to get units of "mbar" vs. "kPa".
#-----------------------------------------------------------------
# Note: We need to be careful with whether units are C or K,
# for all "thermal" quantities (e.g. thermal_capacity).
#-----------------------------------------------------------------
# Note: ARHYTHM had 3 "bulk exchange coefficients" that are all
# equal and therefore have the same units of [m s-1].
# Double-check that this is what is intended. ##########
#-----------------------------------------------------------------
# Note: "atmosphere_column_water__liquid_equivalent_depth" has
# units of "cm", as in Dingman's book. Make sure it gets
# used correctly in equations.
#-----------------------------------------------------------------
# Note: slope_angle and aspect_angle have units of RADIANS.
# aspect_angle is measured CW from north.
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east. They are converted for use here.
#-----------------------------------------------------------------
_var_units_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'kg m-3',
'snowpack__depth': 'm',
'snowpack__liquid-equivalent_depth': 'm',
'snowpack__melt_volume_flux': 'm s-1',
#-------------------------------------------------------------
# 'atmosphere__optical_path_length_ratio': '1',
# 'atmosphere__von_karman_constant': '1',
'atmosphere_aerosol_dust__reduction_of_transmittance': '1',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'cm', # (see Notes above)
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': '1',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': '1',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__emissivity': '1',
'atmosphere_bottom_air__mass-per-volume_density': 'kg m-3',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'J kg-1 K-1', # (see Notes above)
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__pressure': 'mbar',
'atmosphere_bottom_air__temperature': 'deg_C', # (see Notes above)
'atmosphere_bottom_air_flow__bulk_richardson_number': '1',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'm',
'atmosphere_bottom_air_flow__reference-height_speed': 'm s-1',
'atmosphere_bottom_air_flow__speed_reference_height': 'm',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'deg_C',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'mbar', # (see Notes above)
'atmosphere_bottom_air_water-vapor__relative_saturation': '1',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'mbar', # (see Notes above)
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'm3',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__rainfall_volume_flux': 'm s-1', # (see Notes above)
'atmosphere_water__snowfall_leq-volume_flux': 'm s-1', # (see Notes above)
'earth__standard_gravity_constant': 'm s-2',
'land_surface__albedo': '1',
'land_surface__aspect_angle': 'radians', # (see Notes above)
'land_surface__emissivity': '1',
'land_surface__latitude': 'degrees',
'land_surface__longitude': 'degrees',
'land_surface__slope_angle': 'radians',
'land_surface__temperature': 'deg_C',
# 'land_surface_air__temperature': 'deg_C',
'land_surface_air_water-vapor__partial_pressure': 'mbar',
'land_surface_air_water-vapor__saturated_partial_pressure': 'mbar',
'land_surface_net-longwave-radiation__energy_flux': 'W m-2',
'land_surface_net-shortwave-radiation__energy_flux': 'W m-2',
'land_surface_net-total-energy__energy_flux': 'W m-2',
'model__time_step': 's',
'physics__stefan_boltzmann_constant': 'W m-2 K-4',
'physics__von_karman_constant': '1',
'water__mass-specific_latent_fusion_heat': 'J kg-1',
'water__mass-specific_latent_vaporization_heat': 'J kg-1',
'water-liquid__mass-per-volume_density': 'kg m-3' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Meteorology'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
## def get_var_type(self, long_var_name):
##
## #---------------------------------------
## # So far, all vars have type "double",
## # but use the one in BMI_base instead.
## #---------------------------------------
## return 'float64'
##
## # get_var_type()
#-------------------------------------------------------------------
def set_constants(self):
#---------------------------------
# Define some physical constants
#---------------------------------
self.g = np.float64(9.81) # [m s-2, gravity]
self.kappa = np.float64(0.408) # [1] (von Karman)
self.rho_H2O = np.float64(1000) # [kg m-3]
self.rho_air = np.float64(1.2614) # [kg m-3]
self.Cp_air = np.float64(1005.7) # [J kg-1 K-1]
self.Lv = np.float64(2500000) # [J kg-1] Latent heat of vaporiz.
self.Lf = np.float64(334000) # [J kg-1 = W s kg-1], Latent heat of fusion
self.sigma = np.float64(5.67E-8) # [W m-2 K-4] (Stefan-Boltzman constant)
self.C_to_K = np.float64(273.15) # (add to convert deg C to K)
self.twopi = np.float64(2) * np.pi
self.one_seventh = np.float64(1) / 7
self.hours_per_day = np.float64(24)
self.secs_per_day = np.float64(3600) * self.hours_per_day
#---------------------------
# See update_latent_heat()
#-----------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000, p. 1002).
# Is this constant actually the dimensionless ratio of
# the molecular weight of water to that of dry air ?
#-----------------------------------------------------------
## self.latent_heat_constant = np.float64(0.622)
self.latent_heat_constant = np.float64(0.662)
#----------------------------------------
# Constants related to precip (9/24/09)
#----------------------------------------
self.mmph_to_mps = (np.float64(1) / np.float64(3600000))
self.mps_to_mmph = np.float64(3600000)
self.forever = np.float64(999999999) # [minutes]
#------------------------------------------------
# Only needed for method 1, where all rates and
# durations are read as 1D arrays from GUI.
# Method 1 may be removed in a future version.
#------------------------------------------------
## self.method1_rates = None
## self.method1_durations = None
## self.method1_n_rates = 0
# set_constants()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
if not(SILENT):
print ' '
print 'Meteorology component: Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
## print ' Calling read_grid_info()...'
self.read_grid_info()
## print ' Calling initialize_basin_vars()...'
self.initialize_basin_vars() # (5/14/10)
#----------------------------------------------------
# NB! This read_input_files() uses self.time_index.
# Also needs to be before "Disabled" test.
#----------------------------------------------------
## print ' Calling initialize_time_vars()...'
self.initialize_time_vars()
#-------------------------------------------------
# (5/19/12) This makes P "mutable", which allows
# its updated values to be seen by any component
# that has a reference to it.
# Note that P will typically be read from a file.
#-------------------------------------------------
self.P = self.initialize_var( self.P_type )
self.P_rain = self.initialize_var( self.P_type )
self.P_snow = self.initialize_var( self.P_type )
#------------------------------------------------------------
# If "Enabled", will call initialize_computed_vars() below.
#------------------------------------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print 'Meteorology component: Disabled in CFG file.'
self.e_air = self.initialize_scalar(0, dtype='float64')
self.e_surf = self.initialize_scalar(0, dtype='float64')
self.em_air = self.initialize_scalar(0, dtype='float64')
self.Qn_SW = self.initialize_scalar(0, dtype='float64')
self.Qn_LW = self.initialize_scalar(0, dtype='float64')
self.Q_sum = self.initialize_scalar(0, dtype='float64')
self.Qc = self.initialize_scalar(0, dtype='float64')
self.Qa = self.initialize_scalar(0, dtype='float64')
self.DONE = True
self.status = 'initialized'
return
#-----------------------------------------------
# Read from files as needed to initialize vars
#-----------------------------------------------
self.open_input_files()
self.read_input_files() # (initializes P)
## self.check_input_types() # (not needed so far)
#-----------------------
# Initialize variables
#-----------------------
## print ' Calling initialize_computed_vars()...'
self.initialize_computed_vars() # (after read_input_files)
if not(self.PRECIP_ONLY):
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
## def update(self, dt=-1.0, time_seconds=None):
def update(self, dt=-1.0):
#----------------------------------------------------------
# Note: The read_input_files() method is first called by
# the initialize() method. Then, the update()
# method is called one or more times, and it calls
# other update_*() methods to compute additional
# variables using input data that was last read.
# Based on this pattern, read_input_files() should
# be called at end of update() method as done here.
# If the input files don't contain any additional
# data, the last data read persists by default.
#----------------------------------------------------------
if (self.comp_status == 'Disabled'): return
self.status = 'updating' # (OpenMI 2.0 convention)
#-------------------------------------------
# Update computed values related to precip
#-------------------------------------------
self.update_P_integral()
self.update_P_max()
self.update_P_rain()
self.update_P_snow()
#-------------------------
# Update computed values
#-------------------------
if not(self.PRECIP_ONLY):
self.update_bulk_richardson_number()
self.update_bulk_aero_conductance()
self.update_sensible_heat_flux()
self.update_saturation_vapor_pressure(MBAR=True)
self.update_saturation_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_vapor_pressure(MBAR=True)
self.update_dew_point() ###
self.update_precipitable_water_content() ###
self.update_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_latent_heat_flux() # (uses e_air and e_surf)
self.update_conduction_heat_flux()
self.update_advection_heat_flux()
self.update_julian_day()
self.update_net_shortwave_radiation()
self.update_em_air()
self.update_net_longwave_radiation()
self.update_net_energy_flux() # (at the end)
#----------------------------------------
# Read next met vars from input files ?
#-------------------------------------------
# Note that read_input_files() is called
# by initialize() and these values must be
# used for "update" calls before reading
# new ones.
#-------------------------------------------
if (self.time_index > 0):
self.read_input_files()
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
# Components use own self.time_sec by default.
#-----------------------------------------------
if not(self.PRECIP_ONLY):
self.write_output_files()
## self.write_output_files( time_seconds )
#-----------------------------
# Update internal clock
# after write_output_files()
#-----------------------------
self.update_time( dt )
self.status = 'updated' # (OpenMI)
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
if (self.comp_status == 'Enabled'):
self.close_input_files() ## TopoFlow input "data streams"
if not(self.PRECIP_ONLY):
self.close_output_files()
self.status = 'finalized' # (OpenMI)
self.print_final_report(comp_name='Meteorology component')
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#-----------------------------------------------
# Convert precip rate units from mm/h to m/s ?
#-----------------------------------------------
# NB! read_input_files() does this for files.
#-----------------------------------------------
if (self.P_type == 'Scalar'):
## print '######## self.P_type =', self.P_type
## print '######## type(self.P) =', type(self.P)
## print '######## self.P =', self.P
## print '######## Converting scalar P from MMPH to MPS.'
#-----------------------------------------------------
# (2/7/13) Must use "*=" here to preserve reference.
#-----------------------------------------------------
self.P *= self.mmph_to_mps
## self.P = self.P * self.mmph_to_mps
print 'Scalar rainrate set to:', self.P, ' [mmph]'
#---------------------------------
# Process the PRECIP_ONLY toggle
#---------------------------------
if not(hasattr(self, 'PRECIP_ONLY')):
self.PRECIP_ONLY = False
elif (self.PRECIP_ONLY.lower() == 'yes'):
self.PRECIP_ONLY = True
else:
self.PRECIP_ONLY = False
#---------------------------------------
# Print info message about PRECIP_ONLY
#---------------------------------------
if (self.PRECIP_ONLY):
print '-----------------------------------------'
print ' NOTE: Since PRECIP_ONLY = True, output'
print ' variables will not be computed'
print ' or saved to files.'
print '-----------------------------------------'
print' '
#----------------------------------------------------
# Toggle to use SATTERLUND or BRUTSAERT methods
# for computing e_air and em_air. (Not in GUI yet.)
#----------------------------------------------------
if not(hasattr(self, 'SATTERLUND')):
self.SATTERLUND = False
#---------------------------------------------
# Convert GMT_offset from string to int
# because GUI can't use ints in droplist yet
#---------------------------------------------
self.GMT_offset = np.int16( self.GMT_offset )
#------------------------------------------------
# Convert start_month from string to integer
# January should be 1. See solar.Julian_Day().
#------------------------------------------------
month_list = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.start_month = month_list.index( self.start_month ) + 1
#-------------------------------
# Initialize some more toggles
#-------------------------------
if not(hasattr(self, 'SAVE_QSW_GRIDS')):
self.SAVE_QSW_GRIDS = False
if not(hasattr(self, 'SAVE_QLW_GRIDS')):
self.SAVE_QLW_GRIDS = False
#-------------------------------------------
if not(hasattr(self, 'SAVE_QSW_PIXELS')):
self.SAVE_QSW_PIXELS = False
if not(hasattr(self, 'SAVE_QLW_PIXELS')):
self.SAVE_QLW_PIXELS = False
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def initialize_computed_vars(self):
#------------------------------------------------------
# Note: Some of these require "self.rti", which is
# only stored by read_grid_info() after the
# set_computed_input_vars() function is called.
# So these parts can't go there.
#------------------------------------------------------
#---------------------------------------
# Add self.in_directory to:
# slope_grid_file & aspect_grid_file
#---------------------------------------
self.slope_grid_file = (self.in_directory + self.slope_grid_file)
self.aspect_grid_file = (self.in_directory + self.aspect_grid_file)
#-------------------------------------------------
# Read slope grid & convert to slope angle, beta
# NB! RT slope grids have NaNs on edges.
#-------------------------------------------------
slopes = rtg_files.read_grid( self.slope_grid_file, self.rti,
RTG_type='FLOAT' )
beta = np.arctan( slopes )
beta = (self.twopi + beta) % self.twopi
#---------------------------------------------
w_nan = np.where( np.logical_not(np.isfinite(beta)) )
n_nan = np.size(w_nan[0])
if (n_nan != 0):
beta[ w_nan ] = np.float64(0)
#------------------------------------------------------------------
w_bad = np.where( np.logical_or( (beta < 0), (beta > np.pi / 2) ) )
n_bad = np.size(w_bad[0])
if (n_bad != 0):
msg = array(['ERROR: Some slope angles are out of range.', ' '])
for line in msg:
print line
## result = GUI_Message(msg, INFO=True, TITLE='ERROR MESSAGE')
return
self.beta = beta ######
#------------------------------------------------------
# Read aspect grid. Alpha must be CW from north.
# NB! RT aspect grids have NaNs on edges.
#---------------------------------------------------------
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east.
#---------------------------------------------------------
aspects = rtg_files.read_grid( self.aspect_grid_file, self.rti,
RTG_type='FLOAT' )
alpha = (np.pi / 2) - aspects
alpha = (self.twopi + alpha) % self.twopi
#-----------------------------------------------
w_nan = np.where( np.logical_not( np.isfinite(alpha) ) )
n_nan = np.size( w_nan[0] )
if (n_nan != 0):
alpha[ w_nan ] = np.float64(0)
self.alpha = alpha ######
#---------------------------
# Create lon and lat grids
#---------------------------
if (self.rti.pixel_geom == 0):
self.lon_deg = solar.Longitude_Grid( self.rti )
self.lat_deg = solar.Latitude_Grid( self.rti )
## print 'Lon grid ='
## print self.lon_deg
## print 'Lat grid ='
## print self.lat_deg
#-----------------------------
# Write grids to RTG files ?
#-----------------------------
## lon_file = (self.out_directory + self.site_prefix + '_lons.bin')
## rtg_files.write_grid( self.lon_deg, lon_file, self.rti )
## lat_file = (self.out_directory + self.site_prefix + '_lats.bin')
## rtg_files.write_grid( self.lat_deg, lat_file, self.rti )
else:
print 'SORRY: Cannot yet create lon and lat grids for'
print ' this DEM because it uses UTM coordinates.'
print ' Will use lat/lon for Denver, Colorado.'
print ' '
#--------------------------------------------
# For now, use scalar values for Denver, CO
#--------------------------------------------
self.lon_deg = np.float64( -104.9841667 )
self.lat_deg = np.float64( 39.7391667 )
## return
#-------------------------------------------------
# Initialize max precip rate with the first rate
#------------------------------------------------
# Note: Need this here because rate may be
# zero at the end of update_precip_rate()
#------------------------------------------------
# vol_P is used for mass balance check.
#------------------------------------------------
P_max = self.P.max() # (after read_input_files)
## self.P_max = self.P.max()
self.P_max = self.initialize_scalar( P_max, dtype='float64')
self.vol_P = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# For using new framework which embeds references from
# meteorology to snow, etc., these need to be defined
# in the initialize step. However, they will most likely
# change from scalar to grid during update, so we need to
# check that the reference isn't broken when the dtype
# changes. (5/17/12)
#----------------------------------------------------------
# These depend on grids alpha and beta, so will be grids.
#----------------------------------------------------------
self.Qn_SW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_LW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_tot = np.zeros([self.ny, self.nx], dtype='float64')
self.Q_sum = np.zeros([self.ny, self.nx], dtype='float64')
#----------------------------------------------------------
# self.Qn_SW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_LW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_tot = self.initialize_scalar( 0, dtype='float64')
# self.Q_sum = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# These may be scalars or grids.
#---------------------------------
self.Qe = self.initialize_scalar( 0, dtype='float64')
self.e_air = self.initialize_scalar( 0, dtype='float64')
self.e_surf = self.initialize_scalar( 0, dtype='float64')
self.em_air = self.initialize_scalar( 0, dtype='float64')
self.Qc = self.initialize_scalar( 0, dtype='float64')
self.Qa = self.initialize_scalar( 0, dtype='float64')
#------------------------------------
# Initialize the decimal Julian day
#------------------------------------
self.julian_day = solar.Julian_Day( self.start_month,
self.start_day,
self.start_hour )
## print ' julian_day =', self.julian_day
# initialize_computed_vars()
#-------------------------------------------------------------------
def update_P_integral(self):
#---------------------------------------------------
# Notes: This can be used for mass balance checks,
# such as now done by update_mass_totals()
# in topoflow.py. The "dt" here should be
# TopoFlow's "main dt" vs. the process dt.
# dV[i] = P[i] * da[i] * dt, dV = sum(dV[i])
#---------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_integral()...'
#------------------------------------------------
# Update mass total for P, sum over all pixels
#------------------------------------------------
volume = np.double(self.P * self.da * self.dt) # [m^3]
if (np.size(volume) == 1):
self.vol_P += (volume * self.rti.n_pixels)
else:
self.vol_P += np.sum(volume)
# update_P_integral()
#-------------------------------------------------------------------
def update_P_max(self):
if (self.DEBUG):
print 'Calling update_P_max()...'
#-----------------------------------------
# Save the maximum precip. rate in [m/s]
#-------------------------------------------
# Must use "fill()" to preserve reference.
#-------------------------------------------
self.P_max.fill( np.maximum(self.P_max, self.P.max()) )
## self.P_max = np.maximum(self.P_max, self.P.max())
#---------------
# For testing
#--------------
## print '##### P =', self.P * 1000 * 3600 # (mmph)
## print '##### P_max =', self.P_max * 1000 * 3600 # (mmph)
# update_P_max()
#-------------------------------------------------------------------
def update_P_rain(self):
#-----------------------------------------------------------
# Note: This routine is written so that it doesn't matter
# whether P and T_air are grids or scalars.
# For scalars: 1.5 * True = 1.5, 1.5 * False = 0.
# Here are the possible combinations for checking.
#-----------------------------------------------------------
# P T_air P_rain
#----------------------------
# scalar scalar scalar
# scalar grid grid
# grid scalar grid
# grid grid grid
#----------------------------
if (self.DEBUG):
print 'Calling update_P_rain()...'
#-------------------------------------------------
# P_rain is the precip that falls as liquid that
# can contribute to runoff production.
#-------------------------------------------------
# P_rain is used by channel_base.update_R.
#-------------------------------------------------
P_rain = self.P * (self.T_air > 0)
self.update_var( 'P_rain', P_rain ) ## (2/14/17)
# if (np.ndim( self.P_rain ) == 0):
# self.P_rain.fill( P_rain ) #### (mutable scalar)
# else:
# self.P_rain[:] = P_rain
if (self.DEBUG):
if (self.P_rain.max() > 0):
print ' >> Rain is falling...'
#--------------
# For testing
#--------------
## print 'shape(P) =', shape(self.P)
## print 'shape(T_air) =', shape(self.T_air)
## print 'shape(P_rain) =', shape(self.P_rain)
## print 'T_air =', self.T_air
#########################################
#### Old note, to remember for later.
#--------------------------------------------------
# (2/7/13) We must use "*=" to preserve reference
# if P is a "mutable scalar".
#--------------------------------------------------
# update_P_rain()
#-------------------------------------------------------------------
def update_P_snow(self):
#----------------------------------------------------
# Notes: Rain and snow may fall simultaneously at
# different grid cells in the model domain.
#----------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_snow()...'
#-------------------------------------------------
# P_snow is the precip that falls as snow or ice
# that contributes to the snow depth. This snow
# may melt to contribute to runoff later on.
#-------------------------------------------------
# P_snow is used by snow_base.update_depth.
#-------------------------------------------------
P_snow = self.P * (self.T_air <= 0)
self.update_var( 'P_snow', P_snow ) ## (2/14/17)
# if (np.ndim( self.P_snow ) == 0):
# self.P_snow.fill( P_snow ) #### (mutable scalar)
# else:
# self.P_snow[:] = P_snow
if (self.DEBUG):
if (self.P_snow.max() > 0):
print ' >> Snow is falling...'
# update_P_snow()
#-------------------------------------------------------------------
def update_bulk_richardson_number(self):
if (self.DEBUG):
print 'Calling update_bulk_richardson_number()...'
#---------------------------------------------------------------
# (9/6/14) Found a typo in the Zhang et al. (2000) paper,
# in the definition of Ri. Also see Price and Dunne (1976).
# We should have (Ri > 0) and (T_surf > T_air) when STABLE.
# This also removes problems/singularities in the corrections
# for the stable and unstable cases in the next function.
#---------------------------------------------------------------
# Notes: Other definitions are possible, such as the one given
# by Dingman (2002, p. 599). However, this one is the
# one given by Zhang et al. (2000) and is meant for use
# with the stability criterion also given there.
#---------------------------------------------------------------
#### top = self.g * self.z * (self.T_air - self.T_surf) # BUG.
top = self.g * self.z * (self.T_surf - self.T_air)
bot = (self.uz)**2.0 * (self.T_air + np.float64(273.15))
self.Ri = (top / bot)
# update_bulk_richardson_number()
#-------------------------------------------------------------------
def update_bulk_aero_conductance(self):
if (self.DEBUG):
print 'Calling update_bulk_aero_conductance()...'
#----------------------------------------------------------------
# Notes: Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
# h_snow = snow depth [m]
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# z = height that has wind speed uz [m]
# uz = wind speed at height z [m/s]
# kappa = 0.408 = von Karman's constant [unitless]
# RI = Richardson's number (see function)
#----------------------------------------------------------------
h_snow = self.h_snow # (ref from new framework)
#---------------------------------------------------
# Compute bulk exchange coeffs (neutral stability)
# using the logarithm "law of the wall".
#-----------------------------------------------------
# Note that "arg" = the drag coefficient (unitless).
#-----------------------------------------------------
arg = self.kappa / np.log((self.z - h_snow) / self.z0_air)
Dn = self.uz * (arg)**2.0
#-----------------------------------------------
# NB! Dn could be a scalar or a grid, so this
# must be written to handle both cases.
# Note that WHERE can be used on a scalar:
# IDL> a = 1
# IDL> print, size(a)
# IDL> w = where(a ge 1, nw)
# IDL> print, nw
# IDL> a[w] = 2
# IDL> print, a
# IDL> print, size(a)
#-----------------------------------------------
###########################################################
# NB! If T_air and T_surf are both scalars, then next
# few lines won't work because we can't index the
# resulting empty "w" (even if T_air == T_surf).
###########################################################
## w = np.where(self.T_air != self.T_surf)
## nw = np.size(w[0])
## ## nw = np.size(w,0) # (doesn't work if 2 equal scalars)
#----------------------------------------------------------
T_AIR_SCALAR = (np.ndim( self.T_air ) == 0)
T_SURF_SCALAR = (np.ndim( self.T_surf ) == 0)
if (T_AIR_SCALAR and T_SURF_SCALAR):
if (self.T_air == self.T_surf): nw=1
else: nw=0
else:
w = np.where(self.T_air != self.T_surf)
nw = np.size(w[0])
if (nw == 0):
#--------------------------------------------
# All pixels are neutral. Set Dh = De = Dn.
#--------------------------------------------
self.Dn = Dn
self.Dh = Dn
self.De = Dn
return
#-------------------------------------
# One or more pixels are not neutral
# so make a correction using RI
#---------------------------------------------
# NB! RI could be a grid when Dn is a
# scalar, and this will change Dn to a grid.
#---------------------------------------------
# Ri = Richardson_Number(z, uz, T_air, T_surf)
#--------------------------------------------
# Before 12/21/07. Has bug if RI is a grid
#--------------------------------------------
# w_stable = where(*T_air gt *T_surf, n_stable)
# if (n_stable ne 0) then begin
# Dn[w_stable] = Dn[w_stable]/(1d + (10d * RI))
# endif
# w_unstable = where(*T_air lt *T_surf, n_unstable)
# if (n_unstable ne 0) then begin
#----------------------------------------------
# Multiplication and substraction vs. opposites
# for the stable case. Zhang et al. (2000)
# Hopefully not just a typo.
#----------------------------------------------
# Dn[w_unstable] = Dn[w_unstable]*(1d - (10d * self.Ri))
# endif
#-----------------
# After 12/21/07
#------------------------------------------------------------
# If T_air, T_surf or uz is a grid, then Ri will be a grid.
# This version makes only one call to WHERE, so its faster.
#------------------------------------------------------------
# Multiplication and substraction vs. opposites for the
# stable case (Zhang et al., 2000); hopefully not a typo.
# It plots as a smooth curve through Ri=0.
#------------------------------------------------------------
# (9/7/14) Modified so that Dn is saved, but Dh = De.
#------------------------------------------------------------
Dh = Dn.copy() ### (9/7/14. Save Dn also.)
nD = np.size( Dh )
nR = np.size( self.Ri )
if (nR > 1):
#--------------------------
# Case where RI is a grid
#--------------------------
ws = np.where( self.Ri > 0 )
ns = np.size( ws[0] )
wu = np.where( np.invert(self.Ri > 0) )
nu = np.size( wu[0] )
if (nD == 1):
#******************************************
# Convert Dn to a grid here or somewhere
# Should stop with an error message
#******************************************
dum = np.int16(0)
if (ns != 0):
#----------------------------------------------------------
# If (Ri > 0), or (T_surf > T_air), then STABLE. (9/6/14)
#----------------------------------------------------------
Dh[ws] = Dh[ws] / (np.float64(1) + (np.float64(10) * self.Ri[ws]))
if (nu != 0):
Dh[wu] = Dh[wu] * (np.float64(1) - (np.float64(10) * self.Ri[wu]))
else:
#----------------------------
# Case where Ri is a scalar
#--------------------------------
# Works if Dh is grid or scalar
#--------------------------------
if (self.Ri > 0):
Dh = Dh / (np.float64(1) + (np.float64(10) * self.Ri))
else:
Dh = Dh * (np.float64(1) - (np.float64(10) * self.Ri))
#----------------------------------------------------
# NB! We currently assume that these are all equal.
#----------------------------------------------------
self.Dn = Dn
self.Dh = Dh
self.De = Dh ## (assumed equal)
# update_bulk_aero_conductance()
#-------------------------------------------------------------------
def update_sensible_heat_flux(self):
#--------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
# Dh is returned by Bulk_Exchange_Coeff function
# and is not a pointer.
#--------------------------------------------------------
if (self.DEBUG):
print 'Callilng update_sensible_heat_flux()...'
#---------------------
# Physical constants
#---------------------
# rho_air = 1.225d ;[kg m-3, at sea-level]
# Cp_air = 1005.7 ;[J kg-1 K-1]
#-----------------------------
# Compute sensible heat flux
#-----------------------------
delta_T = (self.T_air - self.T_surf)
self.Qh = (self.rho_air * self.Cp_air) * self.Dh * delta_T
# update_sensible_heat_flux()
#-------------------------------------------------------------------
def update_saturation_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_saturation_vapor_pressure()...'
#----------------------------------------------------------------
#Notes: Saturation vapor pressure is a function of temperature.
# T is temperature in Celsius. By default, the method
# of Brutsaert (1975) is used. However, the SATTERLUND
# keyword is set then the method of Satterlund (1979) is
# used. When plotted, they look almost identical. See
# the Compare_em_air_Method routine in Qnet_file.pro.
# Dingman (2002) uses the Brutsaert method.
# Liston (1995, EnBal) uses the Satterlund method.
# By default, the result is returned with units of kPa.
# Set the MBAR keyword for units of millibars.
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#----------------------------------------------------------------
#NB! Here, 237.3 is correct, and not a misprint of 273.2.
# See footnote on p. 586 in Dingman (Appendix D).
#----------------------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']):
## return
T = self.T_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']):
## return
T = self.T_air
if not(self.SATTERLUND):
#------------------------------
# Use Brutsaert (1975) method
#------------------------------
term1 = (np.float64(17.3) * T) / (T + np.float64(237.3))
e_sat = np.float64(0.611) * np.exp(term1) # [kPa]
else:
#-------------------------------
# Use Satterlund (1979) method ############ DOUBLE CHECK THIS (7/26/13)
#-------------------------------
term1 = np.float64(2353) / (T + np.float64(273.15))
e_sat = np.float64(10) ** (np.float64(11.4) - term1) # [Pa]
e_sat = (e_sat / np.float64(1000)) # [kPa]
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e_sat = (e_sat * np.float64(10)) # [mbar]
if (SURFACE):
self.e_sat_surf = e_sat
else:
self.e_sat_air = e_sat
# update_saturation_vapor_pressure()
#-------------------------------------------------------------------
def update_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_vapor_pressure()...'
#---------------------------------------------------
# Notes: T is temperature in Celsius
# RH = relative humidity, in [0,1]
# by definition, it equals (e / e_sat)
# e has units of kPa.
#---------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_air
e = (self.RH * e_sat)
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e = (e * np.float64(10)) # [mbar]
if (SURFACE):
self.e_surf = e
else:
self.e_air = e
# update_vapor_pressure()
#-------------------------------------------------------------------
def update_dew_point(self):
if (self.DEBUG):
print 'Calling update_dew_point()...'
#-----------------------------------------------------------
# Notes: The dew point is a temperature in degrees C and
# is a function of the vapor pressure, e_air.
# Vapor pressure is a function of air temperature,
# T_air, and relative humidity, RH.
# The formula used here needs e_air in kPa units.
# See Dingman (2002, Appendix D, p. 587).
#-----------------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
log_vp = np.log( e_air_kPa )
top = log_vp + np.float64(0.4926)
bot = np.float64(0.0708) - (np.float64(0.00421) * log_vp)
self.T_dew = (top / bot) # [degrees C]
# update_dew_point()
#-------------------------------------------------------------------
def update_precipitable_water_content(self):
if (self.DEBUG):
print 'Calling update_precipitable_water_content()...'
#------------------------------------------------------------
# Notes: W_p is precipitable water content in centimeters,
# which depends on air temp and relative humidity.
#------------------------------------------------------------
arg = np.float64( 0.0614 * self.T_dew )
self.W_p = np.float64(1.12) * np.exp( arg ) # [cm]
# update_precipitable_water_content()
#-------------------------------------------------------------------
def update_latent_heat_flux(self):
if (self.DEBUG):
print 'Calling update_latent_heat_flux()...'
#--------------------------------------------------------
# Notes: Pressure units cancel out because e_air and
# e_surf (in numer) have same units (mbar) as
# p0 (in denom).
#--------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000).
#--------------------------------------------------------
const = self.latent_heat_constant
factor = (self.rho_air * self.Lv * self.De)
delta_e = (self.e_air - self.e_surf)
self.Qe = factor * delta_e * (const / self.p0)
# update_latent_heat_flux()
#-------------------------------------------------------------------
def update_conduction_heat_flux(self):
if (self.DEBUG):
print 'Calling update_conduction_heat_flux()...'
#-----------------------------------------------------------------
# Notes: The conduction heat flux from snow to soil for computing
# snowmelt energy, Qm, is close to zero.
# However, the conduction heat flux from surface and sub-
# surface for computing Qet is given by Fourier's Law,
# namely Qc = Ks(Tx - Ts)/x.
# All the Q's have units of W/m^2 = J/(m^2 s).
#-----------------------------------------------------------------
pass # (initialized at start)
# update_conduction_heat_flux()
#-------------------------------------------------------------------
def update_advection_heat_flux(self):
if (self.DEBUG):
print 'Calling update_advection_heat_flux()...'
#------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
#------------------------------------------------------
pass # (initialized at start)
# update_advection_heat_flux()
#-------------------------------------------------------------------
def update_julian_day(self):
if (self.DEBUG):
print 'Calling update_julian_day()...'
#----------------------------------
# Update the *decimal* Julian day
#----------------------------------
self.julian_day += (self.dt / self.secs_per_day) # [days]
#------------------------------------------
# Compute the offset from True Solar Noon
# clock_hour is in 24-hour military time
# but it can have a decimal part.
#------------------------------------------
dec_part = self.julian_day - np.int16(self.julian_day)
clock_hour = dec_part * self.hours_per_day
## print ' Computing solar_noon...'
solar_noon = solar.True_Solar_Noon( self.julian_day,
self.lon_deg,
self.GMT_offset )
## print ' Computing TSN_offset...'
self.TSN_offset = (clock_hour - solar_noon) # [hours]
# update_julian_day()
#-------------------------------------------------------------------
def update_net_shortwave_radiation(self):
#---------------------------------------------------------
# Notes: If time is before local sunrise or after local
# sunset then Qn_SW should be zero.
#---------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_shortwave_radiation()...'
#---------------------------------------
# Compute Qn_SW for this time [W m-2]
#---------------------------------------
Qn_SW = solar.Clear_Sky_Radiation( self.lat_deg,
self.julian_day,
self.W_p,
self.TSN_offset,
self.alpha,
self.beta,
self.albedo,
self.dust_atten )
self.update_var( 'Qn_SW', Qn_SW ) ## (2/14/17)
# if (np.ndim( self.Qn_SW ) == 0):
# self.Qn_SW.fill( Qn_SW ) #### (mutable scalar)
# else:
# self.Qn_SW[:] = Qn_SW # [W m-2]
# update_net_shortwave_radiation()
#-------------------------------------------------------------------
def update_em_air(self):
if (self.DEBUG):
print 'Calling update_em_air()...'
#---------------------------------------------------------
# NB! The Brutsaert and Satterlund formulas for air
# emissivity as a function of air temperature are in
# close agreement; see compare_em_air_methods().
# However, we must pay close attention to whether
# equations require units of kPa, Pa, or mbar.
#
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#---------------------------------------------------------
# NB! Temperatures are assumed to be given with units
# of degrees Celsius and are converted to Kelvin
# wherever necessary by adding C_to_K = 273.15.
#
# RH = relative humidity [unitless]
#---------------------------------------------------------
# NB! I'm not sure about how F is added at end because
# of how the equation is printed in Dingman (2002).
# But it reduces to other formulas as it should.
#---------------------------------------------------------
T_air_K = self.T_air + self.C_to_K
if not(self.SATTERLUND):
#-----------------------------------------------------
# Brutsaert (1975) method for computing emissivity
# of the air, em_air. This formula uses e_air with
# units of kPa. (From Dingman (2002, p. 196).)
# See notes for update_vapor_pressure().
#-----------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
F = self.canopy_factor
C = self.cloud_factor
term1 = (1.0 - F) * 1.72 * (e_air_kPa / T_air_K) ** self.one_seventh
term2 = (1.0 + (0.22 * C ** 2.0))
self.em_air = (term1 * term2) + F
else:
#--------------------------------------------------------
# Satterlund (1979) method for computing the emissivity
# of the air, em_air, that is intended to "correct
# apparent deficiencies in this formulation at air
# temperatures below 0 degrees C" (see G. Liston)
# Liston cites Aase and Idso(1978), Satterlund (1979)
#--------------------------------------------------------
e_air_mbar = self.e_air
eterm = np.exp(-1 * (e_air_mbar)**(T_air_K / 2016) )
self.em_air = 1.08 * (1.0 - eterm)
#--------------------------------------------------------------
# Can't do this yet. em_air is always initialized scalar now
# but may change to grid on assignment. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.em_air ) == 0):
# self.em_air.fill( em_air ) #### (mutable scalar)
# else:
# self.em_air[:] = em_air
# update_em_air()
#-------------------------------------------------------------------
def update_net_longwave_radiation(self):
#----------------------------------------------------------------
# Notes: Net longwave radiation is computed using the
# Stefan-Boltzman law. All four data types
# should be allowed (scalar, time series, grid or
# grid stack).
#
# Qn_LW = (LW_in - LW_out)
# LW_in = em_air * sigma * (T_air + 273.15)^4
# LW_out = em_surf * sigma * (T_surf + 273.15)^4
#
# Temperatures in [deg_C] must be converted to
# [K]. Recall that absolute zero occurs at
# 0 [deg_K] or -273.15 [deg_C].
#
#----------------------------------------------------------------
# First, e_air is computed as:
# e_air = RH * 0.611 * exp[(17.3 * T_air) / (T_air + 237.3)]
# Then, em_air is computed as:
# em_air = (1 - F) * 1.72 * [e_air / (T_air + 273.15)]^(1/7) *
# (1 + 0.22 * C^2) + F
#----------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_longwave_radiation()...'
#--------------------------------
# Compute Qn_LW for this time
#--------------------------------
T_air_K = self.T_air + self.C_to_K
T_surf_K = self.T_surf + self.C_to_K
LW_in = self.em_air * self.sigma * (T_air_K)** 4.0
LW_out = self.em_surf * self.sigma * (T_surf_K)** 4.0
LW_out = LW_out + ((1.0 - self.em_surf) * LW_in)
self.Qn_LW = (LW_in - LW_out) # [W m-2]
#--------------------------------------------------------------
# Can't do this yet. Qn_LW is always initialized grid now
# but will often be created above as a scalar. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.Qn_LW ) == 0):
# self.Qn_LW.fill( Qn_LW ) #### (mutable scalar)
# else:
# self.Qn_LW[:] = Qn_LW # [W m-2]
# update_net_longwave_radiation()
#-------------------------------------------------------------------
def update_net_total_radiation(self):
#-----------------------------------------------
# Notes: Added this on 9/11/14. Not used yet.
#------------------------------------------------------------
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
#------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_total_radiation()...'
Qn_tot = self.Qn_SW + self.Qn_LW # [W m-2]
self.update_var( 'Qn_tot', Qn_tot ) ## (2/14/17)
# if (np.ndim( self.Qn_tot ) == 0):
# self.Qn_tot.fill( Qn_tot ) #### (mutable scalar)
# else:
# self.Qn_tot[:] = Qn_tot # [W m-2]
# update_net_total_radiation()
#-------------------------------------------------------------------
def update_net_energy_flux(self):
if (self.DEBUG):
print 'Calling update_net_energy_flux()...'
#------------------------------------------------------
# Notes: Q_sum is used by "snow_energy_balance.py".
#------------------------------------------------------
# Qm = energy used to melt snowpack (if > 0)
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
# Qh = sensible heat flux from turbulent convection
# between snow surface and air
# Qe = latent heat flux from evaporation, sublimation,
# and condensation
# Qa = energy advected by moving water (i.e. rainfall)
# (ARHYTHM assumes this to be negligible; Qa=0.)
# Qc = energy flux via conduction from snow to soil
# (ARHYTHM assumes this to be negligible; Qc=0.)
# Ecc = cold content of snowpack = amount of energy
# needed before snow can begin to melt [J m-2]
# All Q's here have units of [W m-2].
# Are they all treated as positive quantities ?
# rho_air = density of air [kg m-3]
# rho_snow = density of snow [kg m-3]
# Cp_air = specific heat of air [J kg-1 K-1]
# Cp_snow = heat capacity of snow [J kg-1 K-1]
# = ???????? = specific heat of snow
# Kh = eddy diffusivity for heat [m2 s-1]
# Ke = eddy diffusivity for water vapor [m2 s-1]
# Lv = latent heat of vaporization [J kg-1]
# Lf = latent heat of fusion [J kg-1]
# ------------------------------------------------------
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat
# De = bulk exchange coeff for vapor
# ------------------------------------------------------
# T_air = air temperature [deg_C]
# T_surf = surface temperature [deg_C]
# T_snow = average snow temperature [deg_C]
# RH = relative humidity [unitless] (in [0,1])
# e_air = air vapor pressure at height z [mbar]
# e_surf = surface vapor pressure [mbar]
# ------------------------------------------------------
# h_snow = snow depth [m]
# z = height where wind speed is uz [m]
# uz = wind speed at height z [m/s]
# p0 = atmospheric pressure [mbar]
# T0 = snow temperature when isothermal [deg_C]
# (This is usually 0.)
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# (Values from page 1033: 0.0013, 0.02 [m])
# kappa = von Karman's constant [unitless] = 0.41
# dt = snowmelt timestep [seconds]
#----------------------------------------------------------------
Q_sum = self.Qn_SW + self.Qn_LW + self.Qh + \
self.Qe + self.Qa + self.Qc # [W m-2]
self.update_var( 'Q_sum', Q_sum ) ## (2/14/17)
# if (np.ndim( self.Q_sum) == 0):
# self.Q_sum.fill( Q_sum ) #### (mutable scalar)
# else:
# self.Q_sum[:] = Q_sum # [W m-2]
# update_net_energy_flux()
#-------------------------------------------------------------------
def open_input_files(self):
if (self.DEBUG):
print 'Calling open_input_files()...'
self.P_file = self.in_directory + self.P_file
self.T_air_file = self.in_directory + self.T_air_file
self.T_surf_file = self.in_directory + self.T_surf_file
self.RH_file = self.in_directory + self.RH_file
self.p0_file = self.in_directory + self.p0_file
self.uz_file = self.in_directory + self.uz_file
self.z_file = self.in_directory + self.z_file
self.z0_air_file = self.in_directory + self.z0_air_file
self.albedo_file = self.in_directory + self.albedo_file
self.em_surf_file = self.in_directory + self.em_surf_file
self.dust_atten_file = self.in_directory + self.dust_atten_file
self.cloud_factor_file = self.in_directory + self.cloud_factor_file
self.canopy_factor_file = self.in_directory + self.canopy_factor_file
self.P_unit = model_input.open_file(self.P_type, self.P_file)
self.T_air_unit = model_input.open_file(self.T_air_type, self.T_air_file)
self.T_surf_unit = model_input.open_file(self.T_surf_type, self.T_surf_file)
self.RH_unit = model_input.open_file(self.RH_type, self.RH_file)
self.p0_unit = model_input.open_file(self.p0_type, self.p0_file)
self.uz_unit = model_input.open_file(self.uz_type, self.uz_file)
self.z_unit = model_input.open_file(self.z_type, self.z_file)
self.z0_air_unit = model_input.open_file(self.z0_air_type, self.z0_air_file)
#-----------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#-----------------------------------------------
self.albedo_unit = model_input.open_file(self.albedo_type,
self.albedo_file)
self.em_surf_unit = model_input.open_file(self.em_surf_type,
self.em_surf_file)
self.dust_atten_unit = model_input.open_file(self.dust_atten_type,
self.dust_atten_file)
self.cloud_factor_unit = model_input.open_file(self.cloud_factor_type,
self.cloud_factor_file)
self.canopy_factor_unit = model_input.open_file(self.canopy_factor_type,
self.canopy_factor_file)
#----------------------------------------------------------------------------
# Note: GMT_offset plus slope and aspect grids will be read separately.
#----------------------------------------------------------------------------
## self.Qn_SW_unit = model_input.open_file(self.Qn_SW_type, self.Qn_SW_file)
## self.Qn_LW_unit = model_input.open_file(self.Qn_LW_type, self.Qn_LW_file)
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
if (self.DEBUG):
print 'Calling read_input_files()...'
rti = self.rti
#--------------------------------------------------------
# All grids are assumed to have a data type of Float32.
#--------------------------------------------------------
# NB! read_next() returns None if TYPE arg is "Scalar".
#--------------------------------------------------------
P = model_input.read_next(self.P_unit, self.P_type, rti,
factor=self.mmph_to_mps)
## print '######### self.P_type = ' + self.P_type
## print '######### np.ndim( P ) = ' + str(np.ndim(P))
if (P is not None):
## print 'MET: (time,P) =', self.time, P
self.update_var( 'P', P ) ### 11/15/16
## if (self.P_type.lower() != 'scalar'):
# if (np.ndim( self.P ) == 0):
# self.P.fill( P ) #### (2/7/13, mutable scalar)
# else:
# self.P = P
if (self.DEBUG or (self.time_index == 0)):
print 'In read_input_files():'
print ' min(P) =', P.min() * self.mps_to_mmph, ' [mmph]'
print ' max(P) =', P.max() * self.mps_to_mmph, ' [mmph]'
print ' '
else:
#-----------------------------------------------
# Either self.P_type is "Scalar" or we've read
# all of the data in the rain_rates file.
#-----------------------------------------------
if (self.P_type.lower() != 'scalar'):
#------------------------------------
# Precip is unique in this respect.
#--------------------------------------------------
# 2/7/13. Note that we don't change P from grid
# to scalar since that could cause trouble for
# other comps that use P, so we just zero it out.
#--------------------------------------------------
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of file:', self.P_file
print ' P set to 0 by read_input_files().'
elif (self.time_sec >= self.dt):
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of scalar rainfall duration.'
print ' P set to 0 by read_input_files().'
## print 'time_sec =', self.time_sec
## print 'met dt =', self.dt
## print '######### In met_base.read_input_files() #######'
## print 'self.P_type =', self.P_type
## print 'self.P =', self.P
#------------------------------------------------------------
# Read variables from files into scalars or grids while
# making sure to preserve references (in-place). (11/15/16)
#------------------------------------------------------------
model_input.read_next2(self, 'T_air', rti)
model_input.read_next2(self, 'T_surf', rti)
model_input.read_next2(self, 'RH', rti)
model_input.read_next2(self, 'p0', rti)
model_input.read_next2(self, 'uz', rti)
model_input.read_next2(self, 'z', rti)
model_input.read_next2(self, 'z0_air', rti)
#----------------------------------------------------
model_input.read_next2(self, 'albedo', rti)
model_input.read_next2(self, 'em_surf', rti)
model_input.read_next2(self, 'dust_atten', rti)
model_input.read_next2(self, 'cloud_factor', rti)
model_input.read_next2(self, 'canopy_factor', rti)
###############################################################
# If any of these are scalars (read from a time series file)
# then we'll need to use "fill()" method to prevent breaking
# the reference to the "mutable scalar". (2/7/13)
###############################################################
T_air = model_input.read_next(self.T_air_unit, self.T_air_type, rti)
self.update_var( 'T_air', T_air )
# if (T_air is not None): self.T_air = T_air
T_surf = model_input.read_next(self.T_surf_unit, self.T_surf_type, rti)
self.update_var( 'T_surf', T_surf )
# if (T_surf is not None): self.T_surf = T_surf
RH = model_input.read_next(self.RH_unit, self.RH_type, rti)
self.update_var( 'RH', RH )
# if (RH is not None): self.RH = RH
p0 = model_input.read_next(self.p0_unit, self.p0_type, rti)
self.update_var( 'p0', p0 )
# if (p0 is not None): self.p0 = p0
uz = model_input.read_next(self.uz_unit, self.uz_type, rti)
self.update_var( 'uz', uz )
# if (uz is not None): self.uz = uz
z = model_input.read_next(self.z_unit, self.z_type, rti)
self.update_var( 'z', z )
# if (z is not None): self.z = z
z0_air = model_input.read_next(self.z0_air_unit, self.z0_air_type, rti)
self.update_var( 'z0_air', z0_air )
# if (z0_air is not None): self.z0_air = z0_air
#----------------------------------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#----------------------------------------------------------------------------
# Note: We could later write a version of read_next() that takes "self"
# and "var_name" as args and that uses "exec()".
#----------------------------------------------------------------------------
albedo = model_input.read_next(self.albedo_unit, self.albedo_type, rti)
if (albedo is not None): self.albedo = albedo
em_surf = model_input.read_next(self.em_surf_unit, self.em_surf_type, rti)
if (em_surf is not None): self.em_surf = em_surf
dust_atten = model_input.read_next(self.dust_atten_unit, self.dust_atten_type, rti)
if (dust_atten is not None): self.dust_atten = dust_atten
cloud_factor = model_input.read_next(self.cloud_factor_unit, self.cloud_factor_type, rti)
if (cloud_factor is not None): self.cloud_factor = cloud_factor
canopy_factor = model_input.read_next(self.canopy_factor_unit, self.canopy_factor_type, rti)
if (canopy_factor is not None): self.canopy_factor = canopy_factor
#-------------------------------------------------------------
# Compute Qsw_prefactor from cloud_factor and canopy factor.
#-------------------------------------------------------------
## self.Qsw_prefactor =
#-------------------------------------------------------------
# These are currently treated as input data, but are usually
# generated by functions in Qnet_file.py. Later on, we'll
# provide the option to compute them "on the fly" with new
# functions called "update_net_shortwave_radiation()" and
# "update_net_longwave_radiation()", called from update().
#-------------------------------------------------------------
## Qn_SW = model_input.read_next(self.Qn_SW_unit, self.Qn_SW_type, rti)
## if (Qn_SW is not None): self.Qn_SW = Qn_SW
##
## Qn_LW = model_input.read_next(self.Qn_LW_unit, self.Qn_LW_type, rti)
## if (Qn_LW is not None): self.Qn_LW = Qn_LW
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
if (self.DEBUG):
print 'Calling close_input_files()...'
if (self.P_type != 'Scalar'): self.P_unit.close()
if (self.T_air_type != 'Scalar'): self.T_air_unit.close()
if (self.T_surf_type != 'Scalar'): self.T_surf_unit.close()
if (self.RH_type != 'Scalar'): self.RH_unit.close()
if (self.p0_type != 'Scalar'): self.p0_unit.close()
if (self.uz_type != 'Scalar'): self.uz_unit.close()
if (self.z_type != 'Scalar'): self.z_unit.close()
if (self.z0_air_type != 'Scalar'): self.z0_air_unit.close()
#---------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#---------------------------------------------------
if (self.albedo_type != 'Scalar'): self.albedo_unit.close()
if (self.em_surf_type != 'Scalar'): self.em_surf_unit.close()
if (self.dust_atten_type != 'Scalar'): self.dust_atten_unit.close()
if (self.cloud_factor_type != 'Scalar'): self.cloud_factor_unit.close()
if (self.canopy_factor_type != 'Scalar'): self.canopy_factor_unit.close()
## if (self.Qn_SW_type != 'Scalar'): self.Qn_SW_unit.close()
## if (self.Qn_LW_type != 'Scalar'): self.Qn_LW_unit.close()
## if (self.P_file != ''): self.P_unit.close()
## if (self.T_air_file != ''): self.T_air_unit.close()
## if (self.T_surf_file != ''): self.T_surf_unit.close()
## if (self.RH_file != ''): self.RH_unit.close()
## if (self.p0_file != ''): self.p0_unit.close()
## if (self.uz_file != ''): self.uz_unit.close()
## if (self.z_file != ''): self.z_unit.close()
## if (self.z0_air_file != ''): self.z0_air_unit.close()
## #--------------------------------------------------------
## if (self.Qn_SW_file != ''): self.Qn_SW_unit.close()
## if (self.Qn_LW_file != ''): self.Qn_LW_unit.close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
if (self.DEBUG):
print 'Calling update_outfile_names()...'
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.ea_gs_file = (self.out_directory + self.ea_gs_file )
self.es_gs_file = (self.out_directory + self.es_gs_file )
self.Qsw_gs_file = (self.out_directory + self.Qsw_gs_file )
self.Qlw_gs_file = (self.out_directory + self.Qlw_gs_file )
self.ema_gs_file = (self.out_directory + self.ema_gs_file )
#------------------------------------------------------------
self.ea_ts_file = (self.out_directory + self.ea_ts_file )
self.es_ts_file = (self.out_directory + self.es_ts_file )
self.Qsw_ts_file = (self.out_directory + self.Qsw_ts_file )
self.Qlw_ts_file = (self.out_directory + self.Qlw_ts_file )
self.ema_ts_file = (self.out_directory + self.ema_ts_file )
## self.ea_gs_file = (self.case_prefix + '_2D-ea.rts')
## self.es_gs_file = (self.case_prefix + '_2D-es.rts')
## #-----------------------------------------------------
## self.ea_ts_file = (self.case_prefix + '_0D-ea.txt')
## self.es_ts_file = (self.case_prefix + '_0D-es.txt')
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
if (self.DEBUG):
print 'Calling open_output_files()...'
model_output.check_netcdf()
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
if (self.SAVE_EA_GRIDS):
model_output.open_new_gs_file( self, self.ea_gs_file, self.rti,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_GRIDS):
model_output.open_new_gs_file( self, self.es_gs_file, self.rti,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_GRIDS):
model_output.open_new_gs_file( self, self.Qsw_gs_file, self.rti,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_GRIDS):
model_output.open_new_gs_file( self, self.Qlw_gs_file, self.rti,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_GRIDS):
model_output.open_new_gs_file( self, self.ema_gs_file, self.rti,
var_name='ema',
long_name='air_emissivity',
units_name='none')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_EA_PIXELS):
model_output.open_new_ts_file( self, self.ea_ts_file, IDs,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_PIXELS):
model_output.open_new_ts_file( self, self.es_ts_file, IDs,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_PIXELS):
model_output.open_new_ts_file( self, self.Qsw_ts_file, IDs,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_PIXELS):
model_output.open_new_ts_file( self, self.Qlw_ts_file, IDs,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_PIXELS):
model_output.open_new_ts_file( self, self.ema_ts_file, IDs,
var_name='ema',
long_name='air_emissivity',
units_name='none')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
if (self.DEBUG):
print 'Calling write_output_files()...'
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
if (self.SAVE_EA_GRIDS): model_output.close_gs_file( self, 'ea')
if (self.SAVE_ES_GRIDS): model_output.close_gs_file( self, 'es')
if (self.SAVE_QSW_GRIDS): model_output.close_gs_file( self, 'Qsw')
if (self.SAVE_QLW_GRIDS): model_output.close_gs_file( self, 'Qlw')
if (self.SAVE_EMA_GRIDS): model_output.close_gs_file( self, 'ema')
#-------------------------------------------------------------------
if (self.SAVE_EA_PIXELS): model_output.close_ts_file( self, 'ea')
if (self.SAVE_ES_PIXELS): model_output.close_ts_file( self, 'es')
if (self.SAVE_QSW_PIXELS): model_output.close_ts_file( self, 'Qsw')
if (self.SAVE_QLW_PIXELS): model_output.close_ts_file( self, 'Qlw')
if (self.SAVE_EMA_PIXELS): model_output.close_ts_file( self, 'ema')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
if (self.SAVE_EA_GRIDS):
model_output.add_grid( self, self.e_air, 'ea', self.time_min )
if (self.SAVE_ES_GRIDS):
model_output.add_grid( self, self.e_surf, 'es', self.time_min )
if (self.SAVE_QSW_GRIDS):
model_output.add_grid( self, self.Qn_SW, 'Qsw', self.time_min )
if (self.SAVE_QLW_GRIDS):
model_output.add_grid( self, self.Qn_LW, 'Qlw', self.time_min )
if (self.SAVE_EMA_GRIDS):
model_output.add_grid( self, self.em_air, 'ema', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self):
IDs = self.outlet_IDs
time = self.time_min ######
if (self.SAVE_EA_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_air, 'ea', IDs )
if (self.SAVE_ES_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_surf, 'es', IDs )
if (self.SAVE_QSW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_SW, 'Qsw', IDs )
if (self.SAVE_QLW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_LW, 'Qlw', IDs )
if (self.SAVE_EMA_PIXELS):
model_output.add_values_at_IDs( self, time, self.em_air, 'ema', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
#---------------------------------------------------------------------------------
def compare_em_air_methods():
#--------------------------------------------------------------
# Notes: There are two different methods that are commonly
# used to compute the vapor pressure of air, e_air,
# and then the emissivity of air, em_air, for use in
# longwave radiation calculations. This routine
# compares them graphically.
#
# NB! This hasn't been tested since conversion from IDL.
#-------------------------------------------------------------
import matplotlib.pyplot
T_air = np.arange(80, dtype='Float32') - np.float64(40) #[Celsius] (-40 to 40)
RH = np.float64(1.0)
C2K = np.float64(273.15)
#--------------------------
# Brutsaert (1975) method
#--------------------------
term1 = (np.float64(17.3) * T_air) / (T_air + np.float64(237.3)) ######### DOUBLE CHECK THIS (7/26/13)
e_air1 = RH * np.float64(0.611) * np.exp( term1 ) # [kPa]
em_air1 = np.float64(1.72) * (e_air1 / (T_air + C2K)) ** (np.float64(1) / 7)
#---------------------------
# Satterlund (1979) method
#----------------------------
# NB! e_air has units of Pa
#----------------------------
term2 = np.float64(2353) / (T_air + C2K)
e_air2 = RH * np.float64(10) ** (np.float64(11.40) - term2) # [Pa]
eterm = np.exp(-np.float64(1) * (e_air2 / np.float64(100)) ** ((T_air + C2K) / np.float64(2016)))
em_air2 = np.float64(1.08) * (np.float64(1) - eterm)
#----------------------------
# Plot the two e_air curves
#--------------------------------
# These two agree quite closely
#--------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, e_air1)
matplotlib.pyplot.show()
## oplot(T_air, (e_air2 / np.float64(1000)), psym=-3) # [Pa -> kPa]
#-----------------------------
# Plot the two em_air curves
#--------------------------------------------------
# These two don't agree very well for some reason
#--------------------------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, em_air1)
matplotlib.pyplot.show()
## oplot(T_air, em_air2, psym=-3)
# compare_em_air_Methods
#---------------------------------------------------------------------------------
| mit |
liebermeister/flux-enzyme-cost-minimization | scripts/monod_curve.py | 1 | 6430 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 1 2015
@author: noore
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.optimize import curve_fit
import definitions as D
import pandas as pd
#LOW_GLUCOSE = D.LOW_CONC['glucoseExt']
LOW_GLUCOSE = 1e-3 # in mM, i.e. 1 uM
MAX_GROWTH_RATE_L = 'max growth rate [h$^{-1}$]'
GROWTH_RATE_LOW_GLU = 'growth rate at\n%g $\mu$M glucose [h$^{-1}$]' % (1e3*LOW_GLUCOSE)
MONOD_COEFF_L = 'Monod coefficient [mM glucose]'
INV_MONOD_COEFF_L = 'inverse of Monod coeff.\n[mM$^{-1}$]'
MAX_GR_OVER_KM_L = 'max. growth rate / $K_{Monod}$ \n[h$^{-1}$ mM$^{-1}$]'
HILL_COEFF_L = 'Hill coefficitent'
MONOD_FUNC = lambda x, gr_max, K_M, h : gr_max / (1 + (K_M/x)**h); p0 = (0.07, 1.0, 1.0)
def calculate_monod_parameters(figure_data):
aerobic_data_df = figure_data['standard']
aerobic_sweep_data_df = figure_data['monod_glucose_aero']
anaerobic_data_df = figure_data['anaerobic'].drop(9999)
anaerobic_sweep_data_df = figure_data['monod_glucose_anae'].drop(9999)
aerobic_sweep_data_df = aerobic_sweep_data_df.transpose().fillna(0)
anaerobic_sweep_data_df = anaerobic_sweep_data_df.transpose().fillna(0)
plot_data = [('aerobic conditions', aerobic_sweep_data_df, aerobic_data_df),
('anaerobic conditions', anaerobic_sweep_data_df, anaerobic_data_df)]
monod_dfs = []
for title, sweep_df, data_df in plot_data:
monod_df = pd.DataFrame(index=sweep_df.columns,
columns=[MAX_GROWTH_RATE_L, MONOD_COEFF_L, HILL_COEFF_L],
dtype=float)
for efm in monod_df.index:
try:
popt, _ = curve_fit(MONOD_FUNC, sweep_df.index, sweep_df[efm],
p0=p0, method='trf')
monod_df.loc[efm, :] = popt
except RuntimeError:
print("cannot resolve Monod curve for EFM %d" % efm)
monod_df.loc[efm, :] = np.nan
# get fig3 data for plotting the other features
monod_df = monod_df.join(data_df)
monod_df[INV_MONOD_COEFF_L] = 1.0/monod_df[MONOD_COEFF_L]
monod_df[MAX_GR_OVER_KM_L] = monod_df[MAX_GROWTH_RATE_L] * monod_df[INV_MONOD_COEFF_L]
# calculate the value of the growth rate using the Monod curve
# for LOW_GLUCOSE
monod_df[GROWTH_RATE_LOW_GLU] = 0
for j in monod_df.index:
monod_df.loc[j, GROWTH_RATE_LOW_GLU] = MONOD_FUNC(LOW_GLUCOSE,
monod_df.at[j, MAX_GROWTH_RATE_L],
monod_df.at[j, MONOD_COEFF_L],
monod_df.at[j, HILL_COEFF_L])
monod_dfs.append((title, monod_df))
return monod_dfs
def plot_monod_scatter(monod_dfs, y_var=MAX_GROWTH_RATE_L):
fig = plt.figure(figsize=(15, 14))
gs1 = gridspec.GridSpec(2, 4, left=0.05, right=0.95, bottom=0.55, top=0.97)
gs2 = gridspec.GridSpec(2, 4, left=0.05, right=0.95, bottom=0.06, top=0.45)
axs = []
for i in range(2):
for j in range(4):
axs.append(plt.subplot(gs1[i, j]))
for i in range(2):
for j in range(4):
axs.append(plt.subplot(gs2[i, j]))
for i, ax in enumerate(axs):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.95),
xycoords='axes fraction', ha='left', va='top',
size=20)
for i, (title, monod_df) in enumerate(monod_dfs):
xaxis_data = [(INV_MONOD_COEFF_L, (1, 2500), 'log'),
(GROWTH_RATE_LOW_GLU, (0.001, 0.2), 'linear')]
for j, (x_var, xlim, xscale) in enumerate(xaxis_data):
ax_row = axs[4*i + 8*j : 4*i + 8*j + 4]
ax = ax_row[0]
x = monod_df[x_var]
y = monod_df[y_var]
CS = ax.scatter(x, y, s=12, marker='o',
facecolors=(0.85, 0.85, 0.85),
linewidth=0)
for efm, (col, lab) in D.efm_dict.items():
if efm in x.index:
ax.plot(x[efm], y[efm], markersize=5, marker='o',
color=col, label=None)
ax.annotate(lab, xy=(x[efm], y[efm]),
xytext=(0, 5), textcoords='offset points',
ha='center', va='bottom', color=col)
ax.set_xlim(xlim[0], xlim[1])
ax.set_xscale(xscale)
ax.set_title('%s' % title, fontsize=16)
ax.set_xlabel(x_var, fontsize=16)
ax.set_ylabel(y_var, fontsize=16)
plot_parameters = [
{'c': D.OXYGEN_L, 'title': 'oxygen uptake' ,
'ax': ax_row[1], 'vmin': 0, 'vmax': 0.8},
{'c': D.YIELD_L, 'title': 'yield' ,
'ax': ax_row[2], 'vmin': 0, 'vmax': 30},
{'c': D.ACE_L, 'title': 'acetate secretion',
'ax': ax_row[3], 'vmin': 0, 'vmax': 0.6}
]
for d in plot_parameters:
x = monod_df[x_var]
y = monod_df[y_var]
c = monod_df[d['c']]
CS = d['ax'].scatter(x, y, s=12, c=c, marker='o',
linewidth=0, cmap='copper_r',
vmin=d['vmin'], vmax=d['vmax'])
cbar = plt.colorbar(CS, ax=d['ax'])
cbar.set_label(d['c'], fontsize=12)
d['ax'].set_title(d['title'], fontsize=16)
if i % 2 == 1:
d['ax'].set_xlabel(x_var, fontsize=16)
d['ax'].set_xlim(xlim[0], xlim[1])
d['ax'].set_xscale(xscale)
for i in range(16):
axs[i].set_yscale('linear')
axs[i].set_ylim(0, 0.85)
if i % 8 == 0:
axs[i].get_xaxis().set_visible(False)
if i % 4 > 0:
axs[i].get_yaxis().set_visible(False)
return fig
if __name__ == '__main__':
figure_data = D.get_figure_data()
monod_dfs = calculate_monod_parameters(figure_data)
figS17 = plot_monod_scatter(monod_dfs)
D.savefig(figS17, 'S17')
#%%
from pandas import ExcelWriter
writer = ExcelWriter(os.path.join(D.OUTPUT_DIR, 'monod_params.xls'))
for title, monod_df in monod_dfs:
monod_df.to_excel(writer, title)
writer.save() | gpl-2.0 |
aminert/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
Vimos/scikit-learn | sklearn/kernel_approximation.py | 7 | 18505 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
pysb/pysb | pysb/examples/run_earm_hpp.py | 5 | 2377 | """ Run the Extrinsic Apoptosis Reaction Model (EARM) using BioNetGen's
Hybrid-Particle Population (HPP) algorithm.
NFsim provides stochastic simulation without reaction network generation,
allowing simulation of models with large (or infinite) reaction networks by
keeping track of species counts. However, it can fail when the number of
instances of a species gets too large (typically >200000). HPP circumvents
this problem by allowing the user to define species with large instance
counts as populations rather than NFsim particles.
This example runs the EARM 1.0 model with HPP, which fails to run on NFsim
with the default settings due to large initial concentration coutns of
several species. By assigning population maps to these species, we can run
the simulation.
Reference: Hogg et al. Plos Comb Biol 2014
https://doi.org/10.1371/journal.pcbi.1003544
"""
from pysb.examples.earm_1_0 import model
from pysb.simulator import BngSimulator
from pysb.simulator.bng import PopulationMap
from pysb import Parameter
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_min_max(name, title=None):
x = np.array([tr[:][name] for tr in trajectories]).T
if not title:
title = name
plt.figure(title)
plt.plot(tout.T, x, '0.5', lw=2, alpha=0.25) # individual trajectories
plt.plot(tout[0], x.mean(1), 'k--', lw=3, label="Mean")
plt.plot(tout[0], x.min(1), 'b--', lw=3, label="Minimum")
plt.plot(tout[0], x.max(1), 'r--', lw=3, label="Maximum")
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Population of %s' % name)
PARP, CPARP, Mito, mCytoC = [model.monomers[x] for x in
['PARP', 'CPARP', 'Mito', 'mCytoC']]
klump = Parameter('klump', 10000, _export=False)
model.add_component(klump)
population_maps = [
PopulationMap(PARP(b=None), klump),
PopulationMap(CPARP(b=None), klump),
PopulationMap(Mito(b=None), klump),
PopulationMap(mCytoC(b=None), klump)
]
sim = BngSimulator(model, tspan=np.linspace(0, 20000, 101))
simres = sim.run(n_runs=20, method='nf', population_maps=population_maps)
trajectories = simres.all
tout = simres.tout
plot_mean_min_max('Bid_unbound')
plot_mean_min_max('PARP_unbound')
plot_mean_min_max('mSmac_unbound')
plot_mean_min_max('tBid_total')
plot_mean_min_max('CPARP_total')
plot_mean_min_max('cSmac_total')
plt.show()
| bsd-2-clause |
MDAnalysis/mdanalysis | package/MDAnalysis/analysis/encore/dimensionality_reduction/reduce_dimensionality.py | 1 | 9928 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
dimensionality reduction frontend --- :mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality`
======================================================================================================================
The module defines a function serving as front-end for various dimensionality
reduction algorithms, wrapping them to allow them to be used interchangably.
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
"""
import numpy as np
from ..confdistmatrix import get_distance_matrix
from ..utils import ParallelCalculation, merge_universes
from ..dimensionality_reduction.DimensionalityReductionMethod import (
StochasticProximityEmbeddingNative)
def reduce_dimensionality(ensembles,
method=StochasticProximityEmbeddingNative(),
select="name CA",
distance_matrix=None,
allow_collapsed_result=True,
ncores=1,
**kwargs):
"""
Reduce dimensions in frames from one or more ensembles, using one or more
dimensionality reduction methods. The function optionally takes
pre-calculated distances matrices as an argument. Note that not all
dimensionality reduction procedure can work directly on distance matrices,
so the distance matrices might be ignored for particular choices of
method.
Parameters
----------
ensembles : MDAnalysis.Universe, or list or list of list thereof
The function takes either a single Universe object, a list of Universe
objects or a list of lists of Universe objects. If given a single
universe, it simply works on the conformations in the trajectory. If
given a list of ensembles, it will merge them and analyse them together,
keeping track of the ensemble to which each of the conformations belong.
Finally, if passed a list of list of ensembles, the function will just
repeat the functionality just described - merging ensembles for each
ensemble in the outer loop.
method : MDAnalysis.analysis.encore.dimensionality_reduction.DimensionalityReductionMethod or list
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. A separate analysis
will be run for each method. Note that different parameters for the
same method can be explored by adding different instances of
the same dimensionality reduction class. Options are Stochastic
Proximity Embedding or Principal Component Analysis.
select : str, optional
Atom selection string in the MDAnalysis format (default is "name CA")
distance_matrix : encore.utils.TriangularMatrix, optional
Distance matrix for stochastic proximity embedding. If this parameter
is not supplied an RMSD distance matrix will be calculated on the fly (default).
If several distance matrices are supplied, an analysis will be done
for each of them. The number of provided distance matrices should
match the number of provided ensembles.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value (default = True).
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
list of coordinate arrays in the reduced dimensions (or potentially a single
coordinate array object if allow_collapsed_result is set to True)
Example
-------
Two ensembles are created as Universe object using a topology file and
two trajectories. The topology- and trajectory files used are obtained
from the MDAnalysis test suite for two different simulations of the protein
AdK.
Here, we reduce two ensembles to two dimensions, and plot the result using
matplotlib: ::
>>> from MDAnalysis import Universe
>>> import MDAnalysis.analysis.encore as encore
>>> from MDAnalysis.tests.datafiles import PSF, DCD, DCD2
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> coordinates, details = encore.reduce_dimensionality([ens1,ens2])
>>> plt.scatter(coordinates[0], coordinates[1],
color=[["red", "blue"][m-1] for m
in details["ensemble_membership"]])
Note how we extracted information about which conformation belonged to
which ensemble from the details variable.
You can change the parameters of the dimensionality reduction method
by explicitly specifying the method ::
>>> coordinates, details =
encore.reduce_dimensionality([ens1,ens2],
method=encore.StochasticProximityEmbeddingNative(dimension=3))
Here is an illustration using Principal Component Analysis, instead
of the default dimensionality reduction method ::
>>> coordinates, details =
encore.reduce_dimensionality(
[ens1,ens2],
method=encore.PrincipalComponentAnalysis(dimension=2))
You can also combine multiple methods in one call ::
>>> coordinates, details =
encore.reduce_dimensionality(
[ens1,ens2],
method=[encore.PrincipalComponentAnalysis(dimension=2),
encore.StochasticProximityEmbeddingNative(dimension=2)])
"""
if ensembles is not None:
if not hasattr(ensembles, '__iter__'):
ensembles = [ensembles]
ensembles_list = ensembles
if not hasattr(ensembles[0], '__iter__'):
ensembles_list = [ensembles]
# Calculate merged ensembles and transfer to memory
merged_ensembles = []
for ensembles in ensembles_list:
# Transfer ensembles to memory
for ensemble in ensembles:
ensemble.transfer_to_memory()
merged_ensembles.append(merge_universes(ensembles))
methods = method
if not hasattr(method, '__iter__'):
methods = [method]
# Check whether any of the methods can make use of a distance matrix
any_method_accept_distance_matrix = \
np.any([_method.accepts_distance_matrix for _method in
methods])
# If distance matrices are provided, check that it matches the number
# of ensembles
if distance_matrix:
if not hasattr(distance_matrix, '__iter__'):
distance_matrix = [distance_matrix]
if ensembles is not None and \
len(distance_matrix) != len(merged_ensembles):
raise ValueError("Dimensions of provided list of distance matrices "
"does not match that of provided list of "
"ensembles: {0} vs {1}"
.format(len(distance_matrix),
len(merged_ensembles)))
else:
# Calculate distance matrices for all merged ensembles - if not provided
if any_method_accept_distance_matrix:
distance_matrix = []
for merged_ensemble in merged_ensembles:
distance_matrix.append(get_distance_matrix(merged_ensemble,
select=select,
**kwargs))
args = []
for method in methods:
if method.accepts_distance_matrix:
args += [(d,) for d in distance_matrix]
else:
for merged_ensemble in merged_ensembles:
coordinates = merged_ensemble.trajectory.timeseries(order="fac")
# Flatten coordinate matrix into n_frame x n_coordinates
coordinates = np.reshape(coordinates,
(coordinates.shape[0], -1))
args.append((coordinates,))
# Execute dimensionality reduction procedure
pc = ParallelCalculation(ncores, methods, args)
# Run parallel calculation
results = pc.run()
# Keep track of which sample belongs to which ensembles
details = {}
if ensembles is not None:
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
ensemble_assignment = np.array(ensemble_assignment)
details['ensemble_membership'] = ensemble_assignment
coordinates = []
for result in results:
coordinates.append(result[1][0])
# details.append(result[1][1])
if allow_collapsed_result and len(coordinates)==1:
coordinates = coordinates[0]
# details = details[0]
return coordinates, details
| gpl-2.0 |
shiquanwang/pylearn2 | pylearn2/cross_validation/tests/test_train_cv_extensions.py | 49 | 1681 | """
Tests for TrainCV extensions.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_monitor_based_save_best_cv():
"""Test MonitorBasedSaveBestCV."""
handle, filename = tempfile.mkstemp()
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_monitor_based_save_best_cv %
{'save_path': filename})
trainer.main_loop()
# clean up
os.remove(filename)
test_yaml_monitor_based_save_best_cv = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: sigmoid,
act_dec: linear
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: exhaustive,
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
cv_extensions: [
!obj:pylearn2.cross_validation.train_cv_extensions.MonitorBasedSaveBestCV {
channel_name: train_objective,
save_path: %(save_path)s,
},
],
}
"""
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/user_interfaces/embedding_in_tk.py | 9 | 1419 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
| unlicense |
justincassidy/scikit-learn | sklearn/ensemble/tests/test_forest.py | 57 | 35265 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
ianctse/pvlib-python | pvlib/test/test_modelchain.py | 1 | 8186 | import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
from nose.tools import with_setup, raises
# should store this test data locally, but for now...
sam_data = {}
def retrieve_sam_network():
sam_data['cecmod'] = pvsystem.retrieve_sam('cecmod')
sam_data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
sam_data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
def mc_setup():
# limit network usage
try:
modules = sam_data['sandiamod']
except KeyError:
retrieve_sam_network()
modules = sam_data['sandiamod']
module = modules.Canadian_Solar_CS5P_220M___2009_.copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module,
inverter_parameters=inverter)
location = Location(32.2, -111, altitude=700)
return system, location
def test_ModelChain_creation():
system, location = mc_setup()
mc = ModelChain(system, location)
def test_orientation_strategy():
strategies = {None: (0, 180), 'None': (0, 180),
'south_at_latitude_tilt': (32.2, 180),
'flat': (0, 180)}
for strategy, expected in strategies.items():
yield run_orientation_strategy, strategy, expected
def run_orientation_strategy(strategy, expected):
system = PVSystem()
location = Location(32.2, -111, altitude=700)
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy == None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
def test_run_model():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 1.82033564e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_irradiance():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather():
system, location = mc_setup()
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 1.99952400e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_tracker():
system, location = mc_setup()
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 121.421719, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
expected = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
assert_frame_equal(mc.tracking, expected)
@raises(ValueError)
def test_bad_get_orientation():
modelchain.get_orientation('bad value')
@raises(ValueError)
def test_basic_chain_required():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
altitude=altitude)
def test_basic_chain_alt_az():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth)
expected = pd.Series(np.array([ 1.14490928477e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_basic_chain_strategy():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
orientation_strategy='south_at_latitude_tilt',
altitude=altitude)
expected = pd.Series(np.array([ 1.82033563543e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_basic_chain_altitude_pressure():
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
pressure=93194)
expected = pd.Series(np.array([ 1.15771428788e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
altitude=altitude)
expected = pd.Series(np.array([ 1.15771428788e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/preprocessing/__init__.py | 14 | 1184 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'label_binarize',
]
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/tree/tests/test_tree.py | 13 | 52365 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_decision_path, name)
| bsd-3-clause |
fmfn/UnbalancedDataset | imblearn/ensemble/_weight_boosting.py | 2 | 11479 | from copy import deepcopy
import numpy as np
from sklearn.base import clone
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble._base import _set_random_states
from sklearn.utils import _safe_indexing
from ..under_sampling.base import BaseUnderSampler
from ..under_sampling import RandomUnderSampler
from ..pipeline import make_pipeline
from ..utils import Substitution, check_target_type
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RUSBoostClassifier(AdaBoostClassifier):
"""Random under-sampling integrated in the learning of AdaBoost.
During learning, the problem of class balancing is alleviated by random
under-sampling the sample at each iteration of the boosting algorithm.
Read more in the :ref:`User Guide <boosting>`.
.. versionadded:: 0.4
Parameters
----------
base_estimator : estimator object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is ``DecisionTreeClassifier(max_depth=1)``.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, default=1.0
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {{'SAMME', 'SAMME.R'}}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
{sampling_strategy}
replacement : bool, default=False
Whether or not to sample randomly with replacement or not.
{random_state}
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
samplers_ : list of RandomUnderSampler
The collection of fitted samplers.
pipelines_ : list of Pipeline
The collection of fitted pipelines (samplers + trees).
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of shape (n_estimator,)
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of shape (n_estimator,)
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The feature importances if supported by the ``base_estimator``.
See Also
--------
BalancedBaggingClassifier : Bagging classifier for which each base
estimator is trained on a balanced bootstrap.
BalancedRandomForestClassifier : Random forest applying random-under
sampling to balance the different bootstraps.
EasyEnsembleClassifier : Ensemble of AdaBoost classifier trained on
balanced bootstraps.
References
----------
.. [1] Seiffert, C., Khoshgoftaar, T. M., Van Hulse, J., & Napolitano, A.
"RUSBoost: A hybrid approach to alleviating class imbalance." IEEE
Transactions on Systems, Man, and Cybernetics-Part A: Systems and Humans
40.1 (2010): 185-197.
Examples
--------
>>> from imblearn.ensemble import RUSBoostClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_classes=3,
... n_informative=4, weights=[0.2, 0.3, 0.5],
... random_state=0)
>>> clf = RUSBoostClassifier(random_state=0)
>>> clf.fit(X, y) # doctest: +ELLIPSIS
RUSBoostClassifier(...)
>>> clf.predict(X) # doctest: +ELLIPSIS
array([...])
"""
@_deprecate_positional_args
def __init__(
self,
base_estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
algorithm="SAMME.R",
sampling_strategy="auto",
replacement=False,
random_state=None,
):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
algorithm=algorithm,
random_state=random_state,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
check_target_type(y)
self.samplers_ = []
self.pipelines_ = []
super().fit(X, y, sample_weight)
return self
def _validate_estimator(self):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
super()._validate_estimator()
self.base_sampler_ = RandomUnderSampler(
sampling_strategy=self.sampling_strategy,
replacement=self.replacement,
)
def _make_sampler_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
sampler = clone(self.base_sampler_)
if random_state is not None:
_set_random_states(estimator, random_state)
_set_random_states(sampler, random_state)
if append:
self.estimators_.append(estimator)
self.samplers_.append(sampler)
self.pipelines_.append(
make_pipeline(deepcopy(sampler), deepcopy(estimator))
)
return estimator, sampler
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator, sampler = self._make_sampler_estimator(random_state=random_state)
X_res, y_res = sampler.fit_resample(X, y)
sample_weight_res = _safe_indexing(sample_weight, sampler.sample_indices_)
estimator.fit(X_res, y_res, sample_weight=sample_weight_res)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1.0 / (n_classes - 1), 1.0])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (
-1.0
* self.learning_rate
* ((n_classes - 1.0) / n_classes)
* (y_coding * np.log(y_predict_proba)).sum(axis=1)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(
estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))
)
return sample_weight, 1.0, estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator, sampler = self._make_sampler_estimator(random_state=random_state)
X_res, y_res = sampler.fit_resample(X, y)
sample_weight_res = _safe_indexing(sample_weight, sampler.sample_indices_)
estimator.fit(X_res, y_res, sample_weight=sample_weight_res)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
self.samplers_.pop(-1)
self.pipelines_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect * (sample_weight > 0))
return sample_weight, estimator_weight, estimator_error
| mit |
apdjustino/DRCOG_Urbansim | urbandeveloper/elasticity_model_2SLS.py | 1 | 6896 | __author__ = 'JMartinez'
import numpy as np, pandas as pd, os
from synthicity.utils import misc
import pysal as py
class elasticity_model(object):
def __init__(self, dset):
self.zones = dset.zones
self.buildings_far = pd.merge(dset.buildings, dset.fars, left_on='far_id', right_index=True)
def estimate_elasticity(self, zones):
dummies = pd.get_dummies(zones.county)
zones = pd.concat([zones, dummies], axis=1)
zones['avg_far'] = self.buildings_far.groupby('zone_id').far.mean() #use far_x because Xavier's code adds far to buildings
#zones = zones[zones.residential_sqft_zone>0]
#wrook = py.queen_from_shapefile('C:/users/jmartinez/documents/Test Zones/zones_prj_res2.shp')
wqueen = py.queen_from_shapefile(os.path.join(misc.data_dir(),'shapefiles\\zones.shp'))
w = py.weights.weights.W(wqueen.neighbors, wqueen.weights)
x = zones[['zonal_pop','mean_income']]
x = x.apply(np.log1p)
x['ln_jobs_within_30min'] = zones['ln_jobs_within_30min']
x['zone_contains_park'] = zones['zone_contains_park']
x['Arapahoe'] = zones['Arapahoe']
x['Boulder'] = zones['Boulder']
x['Broomfield'] = zones['Broomfield']
x['Clear Creek'] = zones['Clear Creek']
x['Denver'] = zones['Denver']
x['Douglas'] = zones['Douglas']
x['Elbert'] = zones['Elbert']
x['Gilpin'] = zones['Gilpin']
x['Jefferson'] = zones['Jefferson']
x['Weld'] = zones['Weld']
x=x.fillna(0)
x = x.as_matrix()
imat = zones[['ln_avg_nonres_unit_price_zone','avg_far']]
imat = imat.fillna(0)
imat = imat.as_matrix()
yend = zones['ln_avg_unit_price_zone']
yend = yend.fillna(0)
yend = yend.as_matrix()
yend = np.reshape(yend,(zones.shape[0],1))
y = zones['residential_sqft_zone']
y = y.fillna(0)
y = y.apply(np.log1p)
y = y.as_matrix()
y = np.reshape(y,(zones.shape[0],1))
imat_names = ['non_res_price','avg_far']
x_names = ['zonal_pop', 'mean_income', 'ln_jobs_within_30min', 'zone_contains_park','Arapahoe','Boulder','Broomfield','Clear Creek','Denver','Douglas','Elbert','Gilpin','Jefferson','Weld']
yend_name = ['ln_avg_unit_price_zone']
y_name = 'residential_sqft_zone'
reg_2sls = py.spreg.twosls_sp.GM_Lag(y, x, yend=yend, q=imat, w=w, w_lags=2, robust ='white', name_x = x_names, name_q = imat_names, name_y = y_name, name_yend = yend_name)
demand_elasticity = np.absolute(reg_2sls.betas[15])
demand_elasticity = 1/demand_elasticity[0]
#
return demand_elasticity
def estimate_non_res_elasticity(self,zones):
dummies = pd.get_dummies(zones.county)
zones = pd.concat([zones, dummies], axis=1)
zones['avg_far'] = self.buildings_far.groupby('zone_id').far.mean() #use far_x because Xavier's code adds far to buildings
#zones = zones[zones.non_residential_sqft_zone>0]
####spatial weights matrix#####
#zones = zones.reset_index()
#zone_coord = zones[['zone_id','zonecentroid_x', 'zonecentroid_y']]
#zone_coord = zone_coord.as_matrix()
wqueen = py.queen_from_shapefile(os.path.join(misc.data_dir(),'shapefiles\\zones.shp'))
#w = py.weights.Distance.DistanceBand(zone_coord, threshold = 50000, binary = False)
#w.transform ='r'
#w = py.weights.weights.W(w.neighbors, w.weights)
w = py.weights.weights.W(wqueen.neighbors, wqueen.weights)
x = zones[['zonal_emp','residential_units_zone']]
x = x.apply(np.log1p)
#x['ln_emp_aggsector_within_5min'] = zones['ln_emp_aggsector_within_5min']
#x['zone_contains_park'] = zones['zone_contains_park']
x['percent_younghead'] = zones['percent_younghead']
x['Arapahoe'] = zones['Arapahoe']
x['Boulder'] = zones['Boulder']
x['Broomfield'] = zones['Broomfield']
x['Clear Creek'] = zones['Clear Creek']
x['Denver'] = zones['Denver']
x['Douglas'] = zones['Douglas']
x['Elbert'] = zones['Elbert']
x['Gilpin'] = zones['Gilpin']
x['Jefferson'] = zones['Jefferson']
x['Weld'] = zones['Weld']
x=x.fillna(0)
x = x.as_matrix()
imat = zones[['ln_avg_unit_price_zone','avg_far']]
imat = imat.fillna(0)
imat = imat.as_matrix()
yend = zones['ln_avg_nonres_unit_price_zone']
yend = yend.fillna(0)
yend = yend.as_matrix()
yend = np.reshape(yend,(zones.shape[0],1))
y = zones['non_residential_sqft_zone']
y = y.fillna(0)
y = y.apply(np.log1p)
y = y.as_matrix()
y = np.reshape(y,(zones.shape[0],1))
imat_names = ['res_price','avg_far']
x_names = ['zonal_emp', 'residential_units_zone', 'percent_younghead','Arapahoe','Boulder','Broomfield','Clear Creek', 'Denver', 'Douglas','Elbert','Gilpin','Jefferson','Weld']
yend_name = ['ln_avg_nonres_unit_price_zone']
y_name = 'non_residential_sqft_zone'
reg_2sls = py.spreg.twosls_sp.GM_Lag(y, x, yend=yend, q=imat, w=w, w_lags=2,robust ='white', name_x = x_names, name_q = imat_names, name_y = y_name, name_yend = yend_name)
#
# ######estimation
# x = zones[['zonal_emp','residential_units_zone']]
# x = x.apply(np.log1p)
# #x['ln_emp_aggsector_within_5min'] = zones['ln_emp_aggsector_within_5min']
# #x['zone_contains_park'] = zones['zone_contains_park']
# x['percent_younghead'] = zones['percent_younghead']
# x=x.fillna(0)
# x = x.as_matrix()
#
# imat = zones[['ln_avg_unit_price_zone','ln_avg_land_value_per_sqft_zone','median_year_built']]
# imat = imat.fillna(0)
# imat = imat.as_matrix()
#
# yend = zones['ln_avg_nonres_unit_price_zone']
# yend = yend.fillna(0)
# yend = yend.as_matrix()
# yend = np.reshape(yend,(zones.shape[0],1))
#
# y = zones['non_residential_sqft_zone']
# y = y.fillna(0)
# y = y.apply(np.log1p)
# y = y.as_matrix()
# y = np.reshape(y,(zones.shape[0],1))
#
#
# imat_names = ['res_price','land_value','median_year_built']
# x_names = ['zonal_emp', 'residential_units_zone', 'percent_younghead']
# yend_name = ['ln_avg_nonres_unit_price_zone']
# y_name = 'non_residential_sqft_zone'
#
# reg_2sls = py.spreg.twosls_sp.GM_Lag(y, x, yend=yend, q=imat, w=w, robust ='white', name_x = x_names, name_q = imat_names, name_y = y_name, name_yend = yend_name)
#
#
demand_elasticity = np.absolute(reg_2sls.betas[14])
demand_elasticity = 1/demand_elasticity[0]
#
return demand_elasticity
| agpl-3.0 |
e-matteson/pipit-keyboard | extras/audio/make_audio_files.py | 1 | 2652 | #!/bin/python2
from __future__ import division
import subprocess
from time import sleep
import os
# for generating sound files
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.wavfile
import scipy.signal as sig
import scipy.stats as stats
master_volume = 1
sounds = {
'A':{'filename':'tick1.wav',
'volume': .9,
'freq': 10,
'length': .01,
'quality': 1,
'tone':'sawtooth',
'a': 2,
'b': 10,},
'W':{'filename':'tick2.wav',
'volume': .8,
'freq': 5,
'length': .01,
'quality': .8,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'M':{'filename':'tick3.wav',
'volume': .8,
'freq': 10,
'length': .05,
'quality': .95,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'S':{'filename':'tick4.wav',
'volume': .4,
'freq': 50,
'length': .04,
'quality': .6,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'U':{'filename':'tick5.wav',
'volume': .5,
'freq': 40,
'length': .02,
'quality': .9,
'tone':'sawtooth',
'a': 2,
'b': 5,},
}
def construct_sound(params, plot_sound=False):
print "constructing sound: %s" % params['filename']
rate = 44100
N = int(rate*params['length'])
time = range(N)
if params['tone'] == 'sawtooth':
raw = sig.sawtooth(np.linspace(0,params['freq'],N))
elif params['tone'] == 'sine':
# not succesfully tested, try plotting
raw = np.sin(np.linspace(0,params['freq'],N))
else:
raise RuntimeError('unknown tone type')
noise = np.random.uniform(-1, 1, N) # 44100 random samples between -1 and 1
envelope = stats.beta(params['a'],params['b']).pdf([n/N for n in time])
data = raw*params['quality'] + noise*(1-params['quality'])
data *= envelope
save_wav(data, params['filename'], params['volume'])
if plot_sound:
figure()
plt.plot(time, raw)
plt.plot(time, envelope)
plt.plot(time, data)
plt.show()
def save_wav(data, filename, volume=1):
total_volume = volume * master_volume
if total_volume > 1 or total_volume < 0:
raise RuntimeError('volume out of range')
scaled_data = np.int16(data/np.max(np.abs(data)) * total_volume * 32767)
scipy.io.wavfile.write(filename, 44100, scaled_data)
def main():
data = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] * 10000
save_wav(data, "test.wav")
# for code in sounds.keys():
# construct_sound(sounds[code])
main()
| gpl-3.0 |
arjunkhode/ASP | lectures/03-Fourier-properties/plots-code/symmetry-real-even.py | 26 | 1150 | import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
| agpl-3.0 |
alexlib/openpiv-python | setup.py | 2 | 1786 | from os import path
from setuptools import setup, find_packages
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
# with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="OpenPIV",
version='0.23.6',
packages=find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=[
'setuptools',
],
install_requires=[
'numpy',
'imageio',
'matplotlib>=3',
'scikit-image',
'scipy',
'natsort',
'GitPython',
'pytest',
'tqdm'
],
classifiers=[
# PyPI-specific version type. The number specified here is a magic
# constant
# with no relation to this application's version numbering scheme.
# *sigh*
'Development Status :: 4 - Beta',
# Sublist of all supported Python versions.
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Sublist of all supported platforms and environments.
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
# Miscellaneous metadata.
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
],
# long_description=long_description,
# long_description_content_type='text/markdown'
)
| gpl-3.0 |
evgchz/scikit-learn | sklearn/linear_model/ransac.py | 16 | 13870 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
if y.ndim == 1:
y = y.reshape(-1, 1)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
residuals_subset = residual_metric(y_pred - y)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
return self.estimator_.score(X, y)
| bsd-3-clause |
openworm/tracker-commons | src/Python/wcon/wcon_parser.py | 3 | 28807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods
------------
reject_duplicates
Classes
------------
WCONWorms
"""
import six
import warnings
from collections import OrderedDict
from six import StringIO
from os import path
import os
import shutil
import json
import jsonschema
import zipfile
import numpy as np
import pandas as pd
idx = pd.IndexSlice
from .wcon_data import parse_data, convert_origin
from .wcon_data import df_upsert, data_as_array
from .wcon_data import get_sorted_ordered_dict
from .wcon_data import reverse_backwards_worms, sort_odict
from .measurement_unit import MeasurementUnit
class WCONWorms():
"""
A set of worm tracker data for one or more worms, as specified by
the WCON standard.
Attributes
-------------
units: dict
May be empty, but is never None since 'units' is required
to be specified.
metadata: dict
If 'metadata' was not specified, metadata is None.
The values in this dict might be nested into further dicts or other
data types.
_data: dictionary of Pandas DataFrames [private]
num_worms: int [property]
data_as_dict: dict [property]
data: DataFrame if num_worms == 1 else dict of DataFrames [property]
[Note: the "files" key is not persisted unless the .load
factory method is used.]
Public-Facing Methods
-------------
load_from_file (JSON_path) [class method]
save_to_file (JSON_path, pretty_print)
to_canon [property]
__add__ [use "+"]
__eq__ [use "=="]
Usage
-------------
# From a string literal:
from io import StringIO
w2 = WCONWorms.load(StringIO('{"units":{"t":"s","x":"mm","y":"mm"}, '
'"data":[]}'))
# WCONWorms.load_from_file accepts any valid WCON, but .save_to_file
# output is always "canonical" WCON, which makes specific choices about
# how to arrange and format the WCON file. This way the functional
# equality of any two WCON files can be tested by this:
w1 = WCONWorms.load_from_file('file1.wcon')
w2 = WCONWorms.load_from_file('file2.wcon')
assert(w1 == w2)
# or:
w1.save_to_file('file1.wcon')
w2.save_to_file('file2.wcon')
import filecmp
assert(filecmp.cmp('file1.wcon', file2.wcon'))
Custom WCON versions
--------------------
Any top-level key other than the basic:
- files
- units
- metadata
- data
... is ignored. Handling them requires subclassing WCONWorms.
"""
"""
================================================================
Properties
================================================================
"""
basic_keys = ['files', 'units', 'metadata', 'data']
@property
def num_worms(self):
try:
return self._num_worms
except AttributeError:
self._num_worms = len(self.worm_ids)
return self._num_worms
@property
def worm_ids(self):
try:
return self._worm_ids
except AttributeError:
self._worm_ids = list(self._data.keys())
return self._worm_ids
@property
def data(self):
"""
Return all worms as one giant DataFrame. Since this can
be inefficient for sparse multiworm data, it is only "lazily"
calculated, i.e. once requested, not at object initialization
"""
try:
return self._data_df
except AttributeError:
if self.num_worms == 0:
self._data_df = None
else:
# Get a list of all dfs
dfs = list(self._data.values())
l = dfs[0]
# Merge all the worm dfs together into one
for r in dfs[1:]:
l = pd.merge(l, r, left_index=True, right_index=True,
how='outer')
self._data_df = l
return self._data_df
@property
def data_as_odict(self):
"""
Return the native ordered-dict-of-DataFrames, the cheapest
option for sparse multiworm data
"""
return self._data
@property
def schema(self):
try:
return self._schema
except AttributeError:
# Only load _schema if this method gets called. Once
# it's loaded, though, persist it in memory and don't lose it
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "wcon_schema.json"), "r") as f:
self._schema = json.loads(f.read())
# Now that the schema has been loaded, we can try again
return self._schema
@classmethod
def validate_from_schema(cls, wcon_string):
jsonschema.validate(json.load(StringIO(wcon_string)), cls().schema)
@property
def canonical_units(self):
"""
A dictionary of canonical versions of the unit for all quantities
"""
return {k: self.units[k].canonical_unit for k in self.units.keys()}
@property
def as_ordered_dict(self):
"""
Return a representation of the worm as an OrderedDict. This is most
useful when saving to a file.
Returns the canonical version of the data, with units in
canonical form, and the data converted to canonical form.
The three keys are:
- 'units'
- 'metadata'
- 'data'
"""
# Not strictly required by JSON but nice to order the four top-level
# keys so we use OrderedDict here instead of dict.
ord_dict = OrderedDict()
# A dictionary of the canonical unit strings for all quantities except
# aspect_size, which is generated at runtime.
units_obj = {k: self.units[k].canonical_unit_string
for k in self.units.keys() if k != 'aspect_size'}
# Sort the units so that every time we save this file, it produces
# exactly the same output. Not required in the JSON standard, but
# nice to have.
units_obj = get_sorted_ordered_dict(units_obj)
ord_dict.update({'units': units_obj})
# The only optional object is "metadata" since "files" is not
# necessary since we don't currently support saving to more than
# one chunk.
if self.metadata:
# Again, sort the metadata (recursively) so that the same file
# is produced each time that can stand up to diffing
metadata_obj = get_sorted_ordered_dict(self.metadata)
ord_dict.update({'metadata': metadata_obj})
canonical = self.to_canon
if canonical._data == {}:
data_arr = []
else:
src = canonical.data_as_odict
data_arr = []
for worm_id in src:
data_arr.extend(data_as_array(src[worm_id]))
ord_dict.update({'data': data_arr})
return ord_dict
"""
================================================================
Comparison Methods
================================================================
"""
@classmethod
def are_units_equal(cls, w1, w2):
"""
Returns
---------
boolean
True if w1.units == w2.units, with the only conversion being
between units that mean the same thing
(e.g. 'mm' and 'millimetres')
False otherwise
"""
if set(w1.units.keys()) != set(w2.units.keys()):
return False
for k in w1.units.keys():
if w1.units[k] != w2.units[k]:
return False
return True
@classmethod
def is_metadata_equal(cls, w1, w2):
"""
Returns
----------
boolean
True if w1.metadata == w2.metadata
"""
return w1.metadata == w2.metadata
@classmethod
def is_data_equal(cls, w1, w2, convert_units=True):
"""
Parameters
-------------
w1, w2: WCONWorms objects
The objects whose .data attributes will be compared
convert_units: bool
If True, the data will first be converted to a standard form
so that if one worm uses millimetres and the other metres, the
data can still be properly compared
TODO:
Add a "threshold" parameter so that perfect equality is not
the only option
"""
# import pdb; pdb.set_trace()
if w1.num_worms != w2.num_worms:
return False
if convert_units:
d1 = w1.to_canon._data
d2 = w2.to_canon._data
else:
d1 = w1._data
d2 = w2._data
for worm_id in w1.worm_ids:
try:
df1 = d1[worm_id]
except KeyError:
df1 = None
try:
df2 = d2[worm_id]
except KeyError:
df2 = None
if (df1 is None) ^ (df2 is None):
# If one is None but the other is not (XOR), data is not equal
return False
elif df1 is None and df2 is None:
# If both None, they are equal
continue
if not pd_equals(df1, df2):
return False
return True
def __eq__(self, other):
"""
Comparison operator (overloaded)
Equivalent to .is_data_equal and .is_metadata_equal
Units are converted
Special units are not considered
"""
return (WCONWorms.is_data_equal(self, other) and
WCONWorms.is_metadata_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Addition operator (overloaded)
"""
return self.merge(self, other)
@property
def is_canon(self):
"""
Returns whether all units are already in their canonical forms.
"""
for data_key in self.units:
mu = self.units[data_key]
if mu.unit_string != mu.canonical_unit_string:
return False
return True
@property
def to_canon(self):
"""
Return a new WCONWorms object, with the same .metadata, but with
.units and .data changed so they are in standard form.
"""
w = WCONWorms()
w.metadata = self.metadata
w.units = self.canonical_units
# Corner case
if self._data == {}:
w._data = OrderedDict({})
return w
w._data = OrderedDict()
for worm_id in self.worm_ids:
w._data[worm_id] = self._data[worm_id].copy()
# Go through each "units" key
for data_key in self.units:
mu = self.units[data_key]
# Don't bother to "convert" units that are already in their
# canonical form.
if mu.unit_string == mu.canonical_unit_string:
continue
tmu = self.units['t']
for worm_id in w.worm_ids:
try:
# Apply across all worm ids and all aspects
mu_slice = \
w._data[worm_id].loc[:, idx[:, data_key, :]].copy()
w._data[worm_id].loc[:, idx[:, data_key, :]] = \
mu_slice.applymap(mu.to_canon)
except KeyError:
# Just ignore cases where there are "units" entries but no
# corresponding data
pass
# Special case: change the dataframe index, i.e. the time units
if tmu.unit_string != tmu.canonical_unit_string:
# Create a function that can be applied elementwise to the
# index values
t_converter = np.vectorize(tmu.to_canon)
new_index = t_converter(w._data[worm_id].index.values)
w._data[worm_id].set_index(new_index, inplace=True)
return w
@classmethod
def merge(cls, w1, w2):
"""
Merge two worm groups, in their standard forms.
Units can differ, but not in their standard forms.
Metadata must be identical.
Data can overlap, as long as it does not clash.
Clashes are checked at a low level of granularity:
e.g. if two worms have different metadata but the individual metadata
entries do not conflict, this method will still fail and raise an
AssertionError.
"""
if not cls.is_metadata_equal(w1, w2):
raise AssertionError("Metadata conflicts between worms to be "
"merged.")
w1c = w1.to_canon
w2c = w2.to_canon
for worm_id in w2c.worm_ids:
if worm_id in w1c.worm_ids:
try:
# Try to upsert w2c's data into w1c. If we cannot
# without an error being raised, the data clashes.
w1c._data[worm_id] = df_upsert(w1c._data[worm_id],
w2c._data[worm_id])
except AssertionError as err:
raise AssertionError("Data conflicts between worms to "
"be merged on worm {0}: {1}"
.format(str(worm_id), err))
else:
# The worm isn't in the 1st group, so just add it
w1c._data[worm_id] = w2c._data[worm_id]
# Sort w1c's list of worms
w1c._data = sort_odict(w1c._data)
# Create a fresh WCONWorms object to reset all the lazily-evaluated
# properties that may change, such as num_worms, in the merged worm
merged_worm = WCONWorms()
merged_worm._data = w1c._data
merged_worm.metadata = w2c.metadata
merged_worm.units = w1c.units
return merged_worm
"""
================================================================
Load / save methods
================================================================
"""
@classmethod
def validate_filename(cls, JSON_path, is_zipped):
"""
Perform simple checks on the file path
JSON_path: str
The path to the file to be evaluated
is_zipped: bool
Whether or not the path is for a zip archive
"""
assert(isinstance(JSON_path, six.string_types))
assert(len(JSON_path) > 0)
if is_zipped:
if JSON_path[-4:].upper() != '.ZIP':
raise Exception("A zip archive like %s must have an "
"extension ending in '.zip'" % JSON_path)
else:
# delete the '.zip' part so the rest can be validated
JSON_path = JSON_path[:-4]
warning_message = (' is either less than 5 characters,'
'consists of only the extension ".WCON", or '
'does not end in ".WCON", the recommended '
'file extension.')
if len(JSON_path) <= 5 or JSON_path[-5:].upper() != '.WCON':
if is_zipped:
warnings.warn('Zip file ends properly in .zip, but the '
'prefix' + warning_message)
else:
warnings.warn('The file name ' + warning_message)
def save_to_file(self, JSON_path, pretty_print=False,
compress_file=False, num_chunks=1):
"""
Save this object to the path specified. The object
will be serialized as a WCON JSON text file.
Parameters
-----------
JSON_path: str
The path to save this object to. A warning is raised if the path
does not end in ".WCON"
pretty_print: bool
If True, adds newlines and spaces to make the file more human-
readable. Otherwise, the JSON output will use as few characters
as possible.
compress_file: bool
If True, saves a compressed version of the WCON JSON text file
num_chunks: int
The number of chunks to break this object into. If
num_chunks > 1 then num_chunks files will be created.
Filenames will have "_1", "_2", etc., added
to the end of the filename after the last path separator
(e.g. "/") and then, before the last "." (if any)
"""
if num_chunks > 1:
raise NotImplementedError("Saving a worm to more than one chunk "
"has not yet been implemented")
self.validate_filename(JSON_path, compress_file)
with open(JSON_path, 'w') as outfile:
json.dump(self.as_ordered_dict, outfile,
indent=4 if pretty_print else None)
if compress_file:
# Zip the file to a TEMP file, then rename to the original,
# overwriting it with the zipped archive.
zf = zipfile.ZipFile(JSON_path + '.TEMP',
'w', zipfile.ZIP_DEFLATED)
zf.write(JSON_path)
zf.close()
os.rename(JSON_path + '.TEMP', JSON_path)
@classmethod
def load_from_file(cls, JSON_path,
load_prev_chunks=True,
load_next_chunks=True,
validate_against_schema=True):
"""
Factory method returning a merged WCONWorms instance of the file
located at JSON_path and all related "chunks" as specified in the
"files" element of the file.
Uses recursion if there are multiple chunks.
Parameters
-------------
JSON_path: str
A file path to a file that can be opened
validate_against_schema: bool
If True, validate before trying to load the file, otherwise don't.
jsonschema.validate takes 99% of the compute time for large files
so use with caution.
load_prev_chunks: bool
If a "files" key is present, load the previous chunks and merge
them with this one. If not present, return only the current
file's worm.
load_next_chunks: bool
If a "files" key is present, load the next chunks and merge
them with this one. If not present, return only the current
file's worm.
"""
print("Loading file: " + JSON_path)
is_zipped = zipfile.is_zipfile(JSON_path)
cls.validate_filename(JSON_path, is_zipped)
# Check if the specified file is compressed
if is_zipped:
zf = zipfile.ZipFile(JSON_path, 'r')
zf_namelist = zf.namelist()
if len(zf_namelist) <= 0:
raise Exception("Filename %s is a zip archive, which is fine, "
"but the archive does not contain any files.")
elif len(zf_namelist) == 1:
# Just one file is in the archive.
print("The file is a zip archive with one file. Attempting "
"to uncompress and then load.")
wcon_bytes = zf.read(zf.namelist()[0])
wcon_string = wcon_bytes.decode("utf-8")
infile = StringIO(wcon_string)
w_current = cls.load(infile, validate_against_schema)
else:
print("The zip archive contains multiple files. We will "
"extract to a temporary folder and then try to load "
"the first file in the archive, then delete the "
"temporary folder.")
# Note: the first file is all we should need since we assume
# the files in the archive are linked together using
# their respective JSON "files" entries
# Make a temporary archive folder
cur_path = os.path.abspath(os.path.dirname(JSON_path))
archive_path = os.path.join(cur_path, '_zip_archive')
if os.path.exists(archive_path):
raise Exception("Archive path %s already exists!"
% archive_path)
else:
os.makedirs(archive_path)
# Extract zip archive to temporary folder
for name in zf_namelist:
zf.extract(name, archive_path)
zf.close()
# Call load_from_file on the first file
first_path = os.path.join(archive_path, zf_namelist[0])
w = cls.load_from_file(first_path)
# Delete the temporary folder
shutil.rmtree(archive_path, ignore_errors=True)
return w
else:
# The file is not a zip file, so assume it's just plaintext JSON
with open(JSON_path, 'r') as infile:
w_current = cls.load(infile, validate_against_schema)
# CASE 1: NO "files" OBJECT, hence no multiple files. We are done.
w_cur = w_current
if w_current.files is None:
return w_current
elif (('next' not in w_cur.files) and ('prev' not in w.cur.files)):
# CASE 2: "files" object exists but no prev/next, assume nothing is
# there
return w_current
else:
# The merge operations below will blast away the .files attribute
# so we need to save a local copy
current_files = w_current.files
# OTHERWISE, CASE 2: MULTIPLE FILES
# The schema guarantees that if "files" is present,
# "current", will exist. Also, that "current" is not
# null and whose corresponding value is a string at least one
# character in length.
cur_ext = current_files['current']
# e.g. cur_filename = 'filename_2.wcon'
# cur_ext = '_2', prefix = 'filename', suffix = '.wcon'
cur_filename = JSON_path
name_offset = cur_filename.find(cur_ext)
if name_offset == -1:
raise AssertionError(
'Mismatch between the filename given in the file "' +
cur_ext +
'" and the file we loaded from "' +
cur_filename +
'".')
path_string = cur_filename[:name_offset]
load_chunks = {'prev': load_prev_chunks,
'next': load_next_chunks}
for direction in ['prev', 'next']:
# If we are supposed to load the previous chunks, and one exists,
# load it and merge it with the current chunk
# Same with the "next" chunks
if (load_chunks[direction] and
current_files is not None and
current_files[direction] is not None and
len(current_files[direction]) > 0):
cur_load_prev_chunks = (direction == 'prev')
cur_load_next_chunks = (direction == 'next')
new_file_name = path_string + current_files[direction][0]
w_new = cls.load_from_file(new_file_name,
cur_load_prev_chunks,
cur_load_next_chunks,
validate_against_schema)
w_current = w_current + w_new
# If no merging took place, we'll still need to delete the "files"
# attribute if it's present (i.e. if both "prev" and "next" were null):
if hasattr(w_current, "files"):
del(w_current.files)
return w_current
@classmethod
def load(cls, JSON_stream, validate_against_schema=True):
"""
Factory method to create a WCONWorms instance
This does NOT load chunks, because a file stream does not
have a file name. In order to load chunks, you must invoke the
factory method load_from_file. You will be passing it a file path
from which it can find the other files/chunks.
Parameters
-------------
JSON_stream: a text stream implementing .read()
e.g. an object inheriting from TextIOBase
validate_against_schema: bool
If True, validate before trying to load the file, otherwise don't.
jsonschema.validate takes 99% of the compute time for large files
so use with caution.
"""
w = cls()
serialized_data = JSON_stream.read()
# Load the whole JSON file into a nested dict. Any duplicate
# keys raise an exception since we've hooked in reject_duplicates
root = json.loads(serialized_data, object_pairs_hook=reject_duplicates)
# ===================================================
# BASIC TOP-LEVEL VALIDATION AGAINST THE SCHEMA
# Validate the raw file against the WCON schema
if validate_against_schema:
jsonschema.validate(root, w.schema)
# ===================================================
# HANDLE THE REQUIRED ELEMENTS: 'units', 'data'
w.units = root['units']
for key in w.units:
w.units[key] = MeasurementUnit.create(w.units[key])
# The only data key without units should be aspect_size, since it's
# generated during the construction of the pandas dataframe
# it is a dimensionless quantity
w.units['aspect_size'] = MeasurementUnit.create('')
if len(root['data']) > 0:
w._data = parse_data(root['data'])
# Shift the coordinates by the amount in the offsets 'ox' and 'oy'
for worm_id in w.worm_ids:
convert_origin(w._data[worm_id])
# Any worms with head=='R' should have their
# coordinates reversed and head reset to 'L'
reverse_backwards_worms(w._data[worm_id])
else:
# "data": {}
w._data = OrderedDict({})
# Raise error if there are any data keys without units
units_keys = set(w.units.keys())
for worm_id in w._data:
df = w._data[worm_id]
if df is None:
data_keys = set()
else:
data_keys = set(df.columns.get_level_values(1))
# "head" and "ventral" don't require units.
keys_missing_units = data_keys - \
units_keys - set(['head', 'ventral'])
if keys_missing_units != set():
raise AssertionError('In worm ' + str(worm_id) + ', the '
'following data keys are missing '
'entries in the "units" object: ' +
str(keys_missing_units))
# ===================================================
# HANDLE THE OPTIONAL ELEMENTS: 'files', 'metadata'
if 'files' in root:
w.files = root['files']
# Handle the case of a single 'next' or 'prev' entry, by
# wrapping it in an array, so we can reliably assume that
# entries are always wrapped in arrays.
for direction in ['next', 'prev']:
if hasattr(w.files, direction):
if isinstance(getattr(w.files, direction), str):
setattr(w.files, direction,
[getattr(w.files, direction)])
else:
w.files = None
if 'metadata' in root:
w.metadata = root['metadata']
else:
w.metadata = None
return w
def pd_equals(df1, df2):
"""
I don't use DataFrame.equals because it returned False for no
apparent reason with one of the centroid unit tests
"""
if not df1.columns.identical(df2.columns):
return False
if not df1.index.identical(df2.index):
return False
try:
pd.util.testing.assert_frame_equal(df1, df2)
except AssertionError:
return False
return True
def reject_duplicates(ordered_pairs):
"""Reject duplicate keys."""
unique_dict = {}
for key, val in ordered_pairs:
if key in unique_dict:
raise KeyError("Duplicate key: %r" % (key,))
else:
unique_dict[key] = val
return unique_dict
| mit |
winklerand/pandas | pandas/tests/test_resample.py | 1 | 135497 | # pylint: disable=E1101
from warnings import catch_warnings
from datetime import datetime, timedelta
from functools import partial
from textwrap import dedent
import pytz
import pytest
import dateutil
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import (Series, DataFrame, Panel, Index, isna,
notna, Timestamp)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
from pandas.core.base import SpecificationError, AbstractMethodError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby import DataError
from pandas._libs.tslibs.resolution import DAYS
from pandas.tseries.frequencies import MONTHS
from pandas.tseries.frequencies import to_offset
from pandas.core.indexes.datetimes import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.core.indexes.period import period_range, PeriodIndex, Period
from pandas.core.resample import (DatetimeIndex, TimeGrouper,
DatetimeIndexResampler)
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
from pandas._libs.period import IncompatibleFrequency
bday = BDay()
# The various methods we support
downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'var', 'ohlc']
upsample_methods = ['count', 'size']
series_methods = ['nunique']
resample_methods = downsample_methods + upsample_methods + series_methods
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResampleAPI(object):
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
self.frame = DataFrame(
{'A': self.series, 'B': self.series, 'C': np.arange(len(dti))})
def test_str(self):
r = self.series.resample('H')
assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '
'label=left, convention=start, base=0]' in str(r))
def test_api(self):
r = self.series.resample('H')
result = r.mean()
assert isinstance(result, Series)
assert len(result) == 217
r = self.series.to_frame().resample('H')
result = r.mean()
assert isinstance(result, DataFrame)
assert len(result) == 217
def test_api_changes_v018(self):
# change from .resample(....., how=...)
# to .resample(......).how()
r = self.series.resample('H')
assert isinstance(r, DatetimeIndexResampler)
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how=how)
expected = getattr(self.series.resample('H'), how)()
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how='ohlc')
expected = self.series.resample('H').ohlc()
tm.assert_frame_equal(result, expected)
# compat for pandas-like methods
for how in ['sort_values', 'isna']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(r, how)()
# invalids as these can be setting operations
r = self.series.resample('H')
pytest.raises(ValueError, lambda: r.iloc[0])
pytest.raises(ValueError, lambda: r.iat[0])
pytest.raises(ValueError, lambda: r.loc[0])
pytest.raises(ValueError, lambda: r.loc[
Timestamp('2013-01-01 00:00:00', offset='H')])
pytest.raises(ValueError, lambda: r.at[
Timestamp('2013-01-01 00:00:00', offset='H')])
def f():
r[0] = 5
pytest.raises(ValueError, f)
# str/repr
r = self.series.resample('H')
with tm.assert_produces_warning(None):
str(r)
with tm.assert_produces_warning(None):
repr(r)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
# masquerade as Series/DataFrame as needed for API compat
assert isinstance(self.series.resample('H'), ABCSeries)
assert not isinstance(self.frame.resample('H'), ABCSeries)
assert not isinstance(self.series.resample('H'), ABCDataFrame)
assert isinstance(self.frame.resample('H'), ABCDataFrame)
# bin numeric ops
for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), Series)
# unary numeric ops
for op in ['__pos__', '__neg__', '__abs__', '__inv__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(), Series)
# comparison ops
for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']:
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), Series)
# IPython introspection shouldn't trigger warning GH 13618
for op in ['_repr_json', '_repr_latex',
'_ipython_canary_method_should_not_exist_']:
r = self.series.resample('H')
with tm.assert_produces_warning(None):
getattr(r, op, None)
# getitem compat
df = self.series.to_frame('foo')
# same as prior versions for DataFrame
pytest.raises(KeyError, lambda: df.resample('H')[0])
# compat for Series
# but we cannot be sure that we need a warning here
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')[0]
expected = self.series.resample('H').mean()[0]
assert result == expected
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')['2005-01-09 23:00:00']
expected = self.series.resample('H').mean()['2005-01-09 23:00:00']
assert result == expected
def test_groupby_resample_api(self):
# GH 12448
# .groupby(...).resample(...) hitting warnings
# when appropriate
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
# replication step
i = pd.date_range('2016-01-03', periods=8).tolist() + \
pd.date_range('2016-01-17', periods=8).tolist()
index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],
names=['group', 'date'])
expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},
index=index)
result = df.groupby('group').apply(
lambda x: x.resample('1D').ffill())[['val']]
assert_frame_equal(result, expected)
def test_groupby_resample_on_api(self):
# GH 15021
# .groupby(...).resample(on=...) results in an unexpected
# keyword warning.
df = DataFrame({'key': ['A', 'B'] * 5,
'dates': pd.date_range('2016-01-01', periods=10),
'values': np.random.randn(10)})
expected = df.set_index('dates').groupby('key').resample('D').mean()
result = df.groupby('key').resample('D', on='dates').mean()
assert_frame_equal(result, expected)
@td.skip_if_no_mpl
def test_plot_api(self):
# .resample(....).plot(...)
# hitting warnings
# GH 12448
s = Series(np.random.randn(60),
index=date_range('2016-01-01', periods=60, freq='1min'))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min').plot()
tm.assert_is_valid_plot_return_object(result)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min', how='sum').plot()
tm.assert_is_valid_plot_return_object(result)
def test_getitem(self):
r = self.frame.resample('H')
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.resample('H')['B']
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
def test_select_bad_cols(self):
g = self.frame.resample('H')
pytest.raises(KeyError, g.__getitem__, ['D'])
pytest.raises(KeyError, g.__getitem__, ['A', 'D'])
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'D']]
def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
# getting
pytest.raises(AttributeError, lambda: r.F)
# setting
def f():
r.F = 'bah'
pytest.raises(ValueError, f)
def test_api_compat_before_use(self):
# make sure that we are setting the binner
# on these attributes
for attr in ['groups', 'ngroups', 'indices']:
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
rs = ts.resample('30s')
# before use
getattr(rs, attr)
# after grouper is initialized is ok
rs.mean()
getattr(rs, attr)
def tests_skip_nuisance(self):
df = self.frame
df['D'] = 'foo'
r = df.resample('H')
result = r[['A', 'B']].sum()
expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)
assert_frame_equal(result, expected)
expected = r[['A', 'B', 'C']].sum()
result = r.sum()
assert_frame_equal(result, expected)
def test_downsample_but_actually_upsampling(self):
# this is reindex / asfreq
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
result = ts.resample('20s').asfreq()
expected = Series([0, 20, 40, 60, 80],
index=pd.date_range('2012-01-01 00:00:00',
freq='20s',
periods=5))
assert_series_equal(result, expected)
def test_combined_up_downsampling_of_irregular(self):
# since we are reallydoing an operation like this
# ts2.resample('2s').mean().ffill()
# preserve these semantics
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ts2.resample('2s', how='mean', fill_method='ffill')
expected = ts2.resample('2s').mean().ffill()
assert_series_equal(result, expected)
def test_transform(self):
r = self.series.resample('20min')
expected = self.series.groupby(
pd.Grouper(freq='20min')).transform('mean')
result = r.transform('mean')
assert_series_equal(result, expected)
def test_fillna(self):
# need to upsample here
rng = pd.date_range('1/1/2012', periods=10, freq='2S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
r = ts.resample('s')
expected = r.ffill()
result = r.fillna(method='ffill')
assert_series_equal(result, expected)
expected = r.bfill()
result = r.fillna(method='bfill')
assert_series_equal(result, expected)
with pytest.raises(ValueError):
r.fillna(0)
def test_apply_without_aggregation(self):
# both resample and groupby should work w/o aggregation
r = self.series.resample('20min')
g = self.series.groupby(pd.Grouper(freq='20min'))
for t in [g, r]:
result = t.apply(lambda x: x)
assert_series_equal(result, self.series)
def test_agg_consistency(self):
# make sure that we are consistent across
# similar aggregations with and w/o selection list
df = DataFrame(np.random.randn(1000, 3),
index=pd.date_range('1/1/2012', freq='S', periods=1000),
columns=['A', 'B', 'C'])
r = df.resample('3T')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
def test_agg(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'],
['mean', 'std']])
for t in cases:
result = t.aggregate([np.mean, np.std])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
result = t.aggregate({'A': np.mean,
'B': np.std})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std']})
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
for t in cases:
result = t['A'].aggregate(['mean', 'sum'])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum'),
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),
('r1', 'A', 'sum'),
('r2', 'B', 'mean'),
('r2', 'B', 'sum')])
def test_agg_misc(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
# passed lambda
for t in cases:
result = t.agg({'A': np.sum,
'B': lambda x: np.std(x, ddof=1)})
rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([r['A'].sum(), rcustom], axis=1)
assert_frame_equal(result, expected, check_like=True)
# agg with renamers
expected = pd.concat([t['A'].sum(),
t['B'].sum(),
t['A'].mean(),
t['B'].mean()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
expected = pd.concat([t['A'].sum(),
t['A'].std(),
t['B'].mean(),
t['B'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.agg(OrderedDict([('A', ['sum', 'std']),
('B', ['mean', 'std'])]))
assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
for t in cases:
result = t[['A', 'B']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# series like aggs
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std')])
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
t[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
pytest.raises(SpecificationError, f)
def test_agg_nested_dicts(self):
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
for t in cases:
def f():
t.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(ValueError, f)
for t in cases:
expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),
t['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_selection_api_validation(self):
# GH 13500
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
df_exp = DataFrame({'a': rng}, index=index)
# non DatetimeIndex
with pytest.raises(TypeError):
df.resample('2D', level='v')
with pytest.raises(ValueError):
df.resample('2D', on='date', level='d')
with pytest.raises(TypeError):
df.resample('2D', on=['a', 'date'])
with pytest.raises(KeyError):
df.resample('2D', level=['a', 'date'])
# upsampling not allowed
with pytest.raises(ValueError):
df.resample('2D', level='d').asfreq()
with pytest.raises(ValueError):
df.resample('2D', on='date').asfreq()
exp = df_exp.resample('2D').sum()
exp.index.name = 'date'
assert_frame_equal(exp, df.resample('2D', on='date').sum())
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
class Base(object):
"""
base class for resampling testing, calling
.create_series() generates a series of each index type
"""
def create_index(self, *args, **kwargs):
""" return the _index_factory created using the args, kwargs """
factory = self._index_factory()
return factory(*args, **kwargs)
@pytest.fixture
def _index_start(self):
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end(self):
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq(self):
return 'D'
@pytest.fixture
def index(self, _index_start, _index_end, _index_freq):
return self.create_index(_index_start, _index_end, freq=_index_freq)
@pytest.fixture
def _series_name(self):
raise AbstractMethodError(self)
@pytest.fixture
def _static_values(self, index):
return np.arange(len(index))
@pytest.fixture
def series(self, index, _series_name, _static_values):
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def frame(self, index, _static_values):
return DataFrame({'value': _static_values}, index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(self, request, index, _series_name, _static_values):
if request.param == Series:
return Series(_static_values, index=index, name=_series_name)
if request.param == DataFrame:
return DataFrame({'value': _static_values}, index=index)
@pytest.mark.parametrize('freq', ['2D', '1H'])
def test_asfreq(self, series_and_frame, freq):
obj = series_and_frame
result = obj.resample(freq).asfreq()
if freq == '2D':
new_index = obj.index.take(np.arange(0, len(obj.index), 2))
new_index.freq = to_offset('2D')
else:
new_index = self.create_index(obj.index[0], obj.index[-1],
freq=freq)
expected = obj.reindex(new_index)
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
frame.iloc[1] = None
result = frame.resample('1H').asfreq(fill_value=4.0)
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index, fill_value=4.0)
assert_frame_equal(result, expected)
def test_resample_interpolate(self):
# # 12925
df = self.create_series().to_frame('value')
assert_frame_equal(
df.resample('1T').asfreq().interpolate(),
df.resample('1T').interpolate())
def test_raises_on_non_datetimelike_index(self):
# this is a non datetimelike index
xp = DataFrame()
pytest.raises(TypeError, lambda: xp.resample('A').mean())
def test_resample_empty_series(self):
# GH12771 & GH12868
s = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
# need to test for ohlc from GH13083
methods = [method for method in resample_methods
if method != 'ohlc']
for method in methods:
result = getattr(s.resample(freq), method)()
expected = s.copy()
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
index = self.create_series().index[:0]
f = DataFrame(index=index)
for freq in ['M', 'D', 'H']:
# count retains dimensions too
methods = downsample_methods + upsample_methods
for method in methods:
result = getattr(f.resample(freq), method)()
if method != 'size':
expected = f.copy()
else:
# GH14962
expected = Series([])
expected.index = f.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_almost_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
def test_resample_empty_dtypes(self):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for index in tm.all_timeseries_index_generator(0):
for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
for how in downsample_methods + upsample_methods:
empty_series = Series([], index, dtype)
try:
getattr(empty_series.resample('d'), how)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
df = self.create_series().to_frame('value')
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
for arg in ['mean', {'value': 'mean'}, ['mean']]:
result_agg = df.resample('2D', loffset='2H').agg(arg)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result_how = df.resample('2D', how=arg, loffset='2H')
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value',
'mean')])
# GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
if isinstance(expected.index, TimedeltaIndex):
with pytest.raises(AssertionError):
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
else:
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
def test_apply_to_empty_series(self):
# GH 14313
series = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
result = series.resample(freq).apply(lambda x: 1)
expected = series.resample(freq).apply(np.sum)
assert_series_equal(result, expected, check_dtype=False)
class TestDatetimeIndex(Base):
_index_factory = lambda x: date_range
@pytest.fixture
def _series_name(self):
return 'dti'
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def create_series(self):
i = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='dti')
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
assert result.index.name == 'index'
result = s.resample('5min', closed='left', label='right').mean()
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min',
name='index')
expected = Series([s[:5].mean(), s[5:10].mean(),
s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = downsample_methods
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = getattr(s.resample(
'5min', closed='right', label='right'), arg)()
expected = s.groupby(grouplist).agg(func)
assert result.index.name == 'index'
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_numpy_compat(self):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
for func in ('min', 'max', 'sum', 'prod',
'mean', 'var', 'std'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func),
func, 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func), axis=1)
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A': np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days', freq='30T', periods=50)
df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta(
np.arange(1480), unit='T'))
result = df.resample('30T').sum()
assert_frame_equal(result, expected)
s = df['A']
result = s.resample('30T').sum()
assert_series_equal(result, expected['A'])
def test_resample_single_period_timedelta(self):
s = Series(list(range(5)), index=pd.timedelta_range(
'1 day', freq='s', periods=5))
result = s.resample('2s').sum()
expected = Series([1, 5, 4], index=pd.timedelta_range(
'1 day', freq='2s', periods=3))
assert_series_equal(result, expected)
def test_resample_timedelta_idempotency(self):
# GH 12072
index = pd.timedelta_range('0', periods=9, freq='10L')
series = Series(range(9), index=index)
result = series.resample('10L').mean()
expected = series
assert_series_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
import datetime
s = Series(np.arange(1., 6), index=[datetime.datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset='1min').mean()
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset=Minute(1)).mean()
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
expected = ser.resample('w-sun', loffset=-bday).last()
assert result.index[0] - bday == expected.index[0]
def test_resample_loffset_count(self):
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method(self):
# GH9915
s = Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').nearest(limit=2)
expected = ts.reindex(result.index, method='nearest', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result(self):
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe(self):
df = (
DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
pytest.raises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start='0s', periods=25, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample('2s', base=5).mean()
without_base = ts.resample('2s').mean()
exp_without_base = timedelta_range(start='0s', end='25s', freq='2s')
exp_with_base = timedelta_range(start='5s', end='29s', freq='2s')
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex(self):
# GH #12169
df = DataFrame({'Group_obj': 'A'},
index=pd.to_timedelta(list(range(20)), unit='s'))
df['Group'] = df['Group_obj'].astype('category')
result = df.resample('10s').agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame({'Group_obj': ['A', 'A'],
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex(['Group_obj', 'Group'], axis=1)
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg(self):
# aggregate a period resampler with a lambda
s2 = Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01', freq='H', periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault(self):
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation(self):
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coerceion(self):
pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = (df.astype("float64")
.resample("H")
.mean()
["a"]
.interpolate("cubic")
)
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error(self):
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique(self):
# GH 12352
df = DataFrame({
'ID': {Timestamp('2015-06-05 00:00:00'): '0010100903',
Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {Timestamp('2015-06-05 00:00:00'): '2015-06-05',
Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap(self):
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst(self):
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_timedelta_values(self):
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range('1 day', '4 day', freq='4D')
df = DataFrame({'time': times}, index=times)
times2 = timedelta_range('1 day', '4 day', freq='2D')
exp = Series(times2, index=times2, name='time')
exp.iloc[1] = pd.NaT
res = df.resample('2D').first()['time']
tm.assert_series_equal(res, exp)
res = df['time'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_datetime_values(self):
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
class TestPeriodIndex(Base):
_index_factory = lambda x: period_range
@pytest.fixture
def _series_name(self):
return 'pi'
def create_series(self):
# TODO: replace calls to .create_series() by injecting the series
# fixture
i = period_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='pi')
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + 1).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
def test_selection(self, index, freq, kind):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
with pytest.raises(NotImplementedError):
df.resample(freq, on='date', kind=kind)
with pytest.raises(NotImplementedError):
df.resample(freq, level='d', kind=kind)
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
pytest.raises(ValueError, lambda: ts.resample('a-dec').mean())
pytest.raises(ValueError, lambda: ts.resample('q-mar').mean())
pytest.raises(ValueError, lambda: ts.resample('M').mean())
pytest.raises(ValueError, lambda: ts.resample('w-thu').mean())
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
for method in resample_methods:
result = getattr(series.resample('M'), method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
with pytest.raises(IncompatibleFrequency):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='end').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
pytest.raises(Exception, lambda: s.resample('A').ffill())
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000],
tz='UTC').tz_convert('America/Chicago')
df = pd.DataFrame([1, 2], index=index)
result = df.resample('12h', closed='right',
label='right').last().ffill()
expected_index_values = ['2016-03-09 12:00:00-06:00',
'2016-03-10 00:00:00-06:00',
'2016-03-10 12:00:00-06:00',
'2016-03-11 00:00:00-06:00',
'2016-03-11 12:00:00-06:00',
'2016-03-12 00:00:00-06:00',
'2016-03-12 12:00:00-06:00',
'2016-03-13 00:00:00-06:00',
'2016-03-13 13:00:00-05:00',
'2016-03-14 01:00:00-05:00',
'2016-03-14 13:00:00-05:00',
'2016-03-15 01:00:00-05:00',
'2016-03-15 13:00:00-05:00']
index = pd.DatetimeIndex(expected_index_values,
tz='UTC').tz_convert('America/Chicago')
expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
foo = Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
@pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']])
def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result_how = df.resample('2D', how=agg_arg, loffset='2H',
kind=kind)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')])
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
@pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)])
@pytest.mark.parametrize('kind', [None, 'period'])
def test_upsampling_ohlc(self, freq, period_mult, kind):
# GH 13083
pi = PeriodIndex(start='2000', freq='D', periods=10)
s = Series(range(len(pi)), index=pi)
expected = s.to_timestamp().resample(freq).ohlc().to_period(freq)
# timestamp-based resampling doesn't include all sub-periods
# of the last original period, so extend accordingly:
new_index = PeriodIndex(start='2000', freq=freq,
periods=period_mult * len(pi))
expected = expected.reindex(new_index)
result = s.resample(freq, kind=kind).ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('periods, values',
[([pd.NaT, '1970-01-01 00:00:00', pd.NaT,
'1970-01-01 00:00:02', '1970-01-01 00:00:03'],
[2, 3, 5, 7, 11]),
([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT,
pd.NaT, pd.NaT, '1970-01-01 00:00:02',
'1970-01-01 00:00:03', pd.NaT, pd.NaT],
[1, 2, 3, 5, 6, 8, 7, 11, 12, 13])])
@pytest.mark.parametrize('freq, expected_values',
[('1s', [3, np.NaN, 7, 11]),
('2s', [3, int((7 + 11) / 2)]),
('3s', [int((3 + 7) / 2), 11])])
def test_resample_with_nat(self, periods, values, freq, expected_values):
# GH 13224
index = PeriodIndex(periods, freq='S')
frame = DataFrame(values, index=index)
expected_index = period_range('1970-01-01 00:00:00',
periods=len(expected_values), freq=freq)
expected = DataFrame(expected_values, index=expected_index)
result = frame.resample(freq).mean()
assert_frame_equal(result, expected)
def test_resample_with_only_nat(self):
# GH 13224
pi = PeriodIndex([pd.NaT] * 3, freq='S')
frame = DataFrame([2, 3, 5], index=pi)
expected_index = PeriodIndex(data=[], freq=pi.freq)
expected = DataFrame([], index=expected_index)
result = frame.resample('1s').mean()
assert_frame_equal(result, expected)
class TestTimedeltaIndex(Base):
_index_factory = lambda x: timedelta_range
@pytest.fixture
def _index_start(self):
return '1 day'
@pytest.fixture
def _index_end(self):
return '10 day'
@pytest.fixture
def _series_name(self):
return 'tdi'
def create_series(self):
i = timedelta_range('1 day',
'10 day', freq='D')
return Series(np.arange(len(i)), index=i, name='tdi')
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
class TestResamplerGrouper(object):
def setup_method(self, method):
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)},
index=date_range('1/1/2000',
freq='s',
periods=40))
def test_back_compat_v180(self):
df = self.frame
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how=how)
expected = getattr(df.groupby('A').resample('4s'), how)()
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how='mean',
fill_method='ffill')
expected = df.groupby('A').resample('4s').mean().ffill()
assert_frame_equal(result, expected)
def test_tab_complete_ipython6_warning(self, ip):
from IPython.core.completer import provisionalcompleter
code = dedent("""\
import pandas.util.testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
""")
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('rs.', 1))
def test_deferred_with_groupby(self):
# GH 12486
# support deferred resample ops with groupby
data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3],
['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7],
['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5],
['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1],
['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]]
df = DataFrame(data, columns=['date', 'id', 'score'])
df.date = pd.to_datetime(df.date)
f = lambda x: x.set_index('date').resample('D').asfreq()
expected = df.groupby('id').apply(f)
result = df.set_index('date').groupby('id').resample('D').asfreq()
assert_frame_equal(result, expected)
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
f = lambda x: x.resample('1D').ffill()
expected = df.groupby('group').apply(f)
result = df.groupby('group').resample('1D').ffill()
assert_frame_equal(result, expected)
def test_getitem(self):
g = self.frame.groupby('A')
expected = g.B.apply(lambda x: x.resample('2s').mean())
result = g.resample('2s').B.mean()
assert_series_equal(result, expected)
result = g.B.resample('2s').mean()
assert_series_equal(result, expected)
result = g.resample('2s').mean().B
assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}]
df = DataFrame(data, index=pd.date_range('2016-01-01', periods=2))
r = df.groupby('id').resample('1D')
result = r['buyer'].count()
expected = Series([1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp('2016-01-01')),
(2, Timestamp('2016-01-02'))],
names=['id', None]),
name='buyer')
assert_series_equal(result, expected)
result = r['buyer'].count()
assert_series_equal(result, expected)
def test_nearest(self):
# GH 17496
# Resample nearest
index = pd.date_range('1/1/2000', periods=3, freq='T')
result = Series(range(3), index=index).resample('20s').nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
['2000-01-01 00:00:00', '2000-01-01 00:00:20',
'2000-01-01 00:00:40', '2000-01-01 00:01:00',
'2000-01-01 00:01:20', '2000-01-01 00:01:40',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]',
freq='20S'))
assert_series_equal(result, expected)
def test_methods(self):
g = self.frame.groupby('A')
r = g.resample('2s')
for f in ['first', 'last', 'median', 'sem', 'sum', 'mean',
'min', 'max']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
for f in ['size']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['count']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
# series only
for f in ['nunique']:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['nearest', 'backfill', 'ffill', 'asfreq']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample('2s').ohlc())
assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply(self):
g = self.frame.groupby('A')
r = g.resample('2s')
# reduction
expected = g.resample('2s').sum()
def f(x):
return x.resample('2s').sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample('2s').apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_apply_with_mutated_index(self):
# GH 15169
index = pd.date_range('1-1-2015', '12-31-15', freq='D')
df = DataFrame(data={'col1': np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=['a', 'b'])
return s
expected = df.groupby(pd.Grouper(freq='M')).apply(f)
result = df.resample('M').apply(f)
assert_frame_equal(result, expected)
# A case for series
expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f)
result = df['col1'].resample('M').apply(f)
assert_series_equal(result, expected)
def test_resample_groupby_with_label(self):
# GH 13235
index = date_range('2000-01-01', freq='2D', periods=5)
df = DataFrame(index=index,
data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]}
)
result = df.groupby('col0').resample('1W', label='left').sum()
mi = [np.array([0, 0, 1, 2]),
pd.to_datetime(np.array(['1999-12-26', '2000-01-02',
'2000-01-02', '2000-01-02'])
)
]
mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None])
expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]},
index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window(self):
# consistent return values with window
df = self.frame
expected = pd.Int64Index([1, 2, 3], name='A')
result = df.groupby('A').resample('2s').mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby('A').rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns(self):
# GH 14233
df = DataFrame(np.random.randn(20, 3),
columns=list('aaa'),
index=pd.date_range('2012-01-01', periods=20, freq='s'))
df2 = df.copy()
df2.columns = ['a', 'b', 'c']
expected = df2.resample('5s').median()
result = df.resample('5s').median()
expected.columns = result.columns
assert_frame_equal(result, expected)
class TestTimeGrouper(object):
def setup_method(self, method):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
expected = self.ts.groupby(lambda x: x.year).count()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
result = self.ts.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = self.ts.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', closed='right').prod()
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
with catch_warnings(record=True):
wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert (isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex')
index_funcs = (tm.makeIntIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assert_raises_regex(TypeError,
"Only valid with "
"DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an "
"instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order(self):
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 7453
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'sum', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['size']:
normal_result = getattr(normal_grouped, func)()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 9925
assert dt_result.index.name == 'key'
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
def test_repr(self):
# GH18203
result = repr(TimeGrouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
assert result == expected
| bsd-3-clause |
mjudsp/Tsallis | sklearn/manifold/tests/test_locally_linear.py | 27 | 5247 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
lenovor/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
xwolf12/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
wateraccounting/wa | Collect/CFSR/DataAccess_CFSR.py | 1 | 8868 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Collect/CFSR
"""
# General modules
import pandas as pd
import os
import numpy as np
from netCDF4 import Dataset
import re
from joblib import Parallel, delayed
# WA+ modules
from wa.Collect.CFSR.Download_data_CFSR import Download_data
from wa.General import data_conversions as DC
def CollectData(Dir, Var, Startdate, Enddate, latlim, lonlim, Waitbar, cores, Version):
"""
This function collects daily CFSR data in geotiff format
Keyword arguments:
Dir -- 'C:/file/to/path/'
Var -- 'dlwsfc','dswsfc','ulwsfc', or 'uswsfc'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -50 and 50)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Waitbar -- 1 (Default) will print a wait bar
cores -- The number of cores used to run the routine.
It can be 'False' to avoid using parallel computing
routines.
Version -- 1 or 2 (1 = CFSR, 2 = CFSRv2)
"""
# Creates an array of the days of which the ET is taken
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# For collecting CFSR data
if Version == 1:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9171038899 or latlim[1] > 89.9171038899:
print 'Latitude above 89.917N or below 89.917S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9171038899)
latlim[1] = np.minimum(latlim[1],89.9171038899)
if lonlim[0] < -180 or lonlim[1] > 179.843249782:
print 'Longitude must be between 179.84E and 179.84W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.843249782)
# Make directory for the CFSR data
output_folder=os.path.join(Dir,'Radiation','CFSR')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# For collecting CFSRv2 data
if Version == 2:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9462116040955806 or latlim[1] > 89.9462116040955806:
print 'Latitude above 89.917N or below 89.946S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9462116040955806)
latlim[1] = np.minimum(latlim[1],89.9462116040955806)
if lonlim[0] < -180 or lonlim[1] > 179.8977275:
print 'Longitude must be between 179.90E and 179.90W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.8977275)
# Make directory for the CFSRv2 data
output_folder=os.path.join(Dir,'Radiation','CFSRv2')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Pass variables to parallel function and run
args = [output_folder, latlim, lonlim, Var, Version]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
# Remove all .nc and .grb2 files
for f in os.listdir(output_folder):
if re.search(".nc", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grb2", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grib2", f):
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
# unpack the arguments
[output_folder, latlim, lonlim, Var, Version] = args
# Name of the model
if Version == 1:
version_name = 'CFSR'
if Version == 2:
version_name = 'CFSRv2'
# Name of the outputfile
if Var == 'dlwsfc':
Outputname = 'DLWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'dswsfc':
Outputname = 'DSWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'ulwsfc':
Outputname = 'ULWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'uswsfc':
Outputname = 'USWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
# Create the total end output name
outputnamePath = os.path.join(output_folder, Outputname)
# If the output name not exists than create this output
if not os.path.exists(outputnamePath):
local_filename = Download_data(Date, Version, output_folder, Var)
# convert grb2 to netcdf (wgrib2 module is needed)
for i in range(0,4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i+1) + '.nc'
# Total path of the output
FileNC6hour = os.path.join(output_folder, nameNC)
# Band number of the grib data which is converted in .nc
band=(int(Date.strftime('%d')) - 1) * 28 + (i + 1) * 7
# Convert the data
DC.Convert_grb2_to_nc(local_filename, FileNC6hour, band)
if Version == 1:
if Date < pd.Timestamp(pd.datetime(2011, 01, 01)):
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.1562497) / 0.3125)
Xend = np.ceil((lonlim[1] + 180.1562497) / 0.3125) + 1
Ystart = np.floor((latlim[0] + 89.9171038899) / 0.3122121663)
Yend = np.ceil((latlim[1] + 89.9171038899) / 0.3122121663)
# Create a new dataset
Datatot = np.zeros([576, 1152])
else:
Version = 2
if Version == 2:
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.102272725) / 0.204545)
Xend = np.ceil((lonlim[1] + 180.102272725) / 0.204545) + 1
Ystart = np.floor((latlim[0] + 89.9462116040955806) / 0.204423)
Yend = np.ceil((latlim[1] + 89.9462116040955806) / 0.204423)
# Create a new dataset
Datatot = np.zeros([880, 1760])
# Open 4 times 6 hourly dataset
for i in range (0, 4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i + 1) + '.nc'
FileNC6hour = os.path.join(output_folder, nameNC)
f = Dataset(FileNC6hour, mode = 'r')
Data = f.variables['Band1'][0:int(Datatot.shape[0]), 0:int(Datatot.shape[1])]
f.close()
data = np.array(Data)
Datatot = Datatot + data
# Calculate the average in W/m^2 over the day
DatatotDay = Datatot / 4
DatatotDayEnd = np.zeros([int(Datatot.shape[0]), int(Datatot.shape[1])])
DatatotDayEnd[:,0:int(Datatot.shape[0])] = DatatotDay[:, int(Datatot.shape[0]):int(Datatot.shape[1])]
DatatotDayEnd[:,int(Datatot.shape[0]):int(Datatot.shape[1])] = DatatotDay[:, 0:int(Datatot.shape[0])]
# clip the data to the extent difined by the user
DatasetEnd = DatatotDayEnd[int(Ystart):int(Yend), int(Xstart):int(Xend)]
# save file
if Version == 1:
pixel_size = 0.3125
if Version == 2:
pixel_size = 0.204545
geo = [lonlim[0],pixel_size,0,latlim[1],0,-pixel_size]
DC.Save_as_tiff(data = np.flipud(DatasetEnd), name = outputnamePath, geo = geo, projection = "WGS84")
return()
| apache-2.0 |
renyi533/tensorflow | tensorflow/python/kernel_tests/constant_op_eager_test.py | 33 | 21448 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegexp(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self):
return -1
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegexp(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
andela-mfalade/python-pandas-csv-records-analysis | scripts/processor.py | 1 | 5798 | """Initiate file analysis.
This module is used to find the discrepancies between two given files
"""
import argparse
import csv
import logging
import pandas as pd
logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.DEBUG)
mathching_records_path = 'matching_records.csv'
non_mathching_records_path = 'non_matching_records.csv'
records_diff = "customers_in_chartio_but_not_in_responsys.csv"
no_project_key_path = 'records_with_no_project_key.csv'
no_customer_key_path = 'records_with_no_customer_key.csv'
no_project_status_path = 'records_with_no_project_status.csv'
CHARTIO_GRADES = ['exceeded', 'failed', 'passed', 'ungradeable']
def get_file_df(file_path):
"""Read the file from file path then create a Pandas data frame from file.
It eventually extracts the needed keys from this df and stored it in a dict
Args:
file_path(str): This is the path to the csv file to be read
Returns:
target_dict(dict): This holds key value pairs for future comparison
"""
logger.info("Reading CSV file.")
contacts_df = pd.read_csv(file_path)
target_dict = dict()
for contact_info in contacts_df.itertuples():
# Each unique_key is a concatenation of the contact_info's account_key
# and the project_id.
_, contact_id, project_id = contact_info
unique_key = "{x}-{y}".format(x=contact_id, y=project_id)
target_dict.update({unique_key: contact_info})
return target_dict
def write_to_csv(file_path, content):
"""Write content to file.
This simple method writes the given content to the file in the specified
file path.
It creates a new file in the path if no file exists there
It keeps appending to the file per call.
Args:
file_path(str): Path to file
content(list): A list of records which represent each row of the file
TODO: Make whole process run faster by opening each file during the
whole process
Opening and closing a file per call slows down the whole write process
"""
with open(file_path, 'a') as f:
writer = csv.writer(f)
writer.writerow(content)
def get_unique_key(project_status, project_id, customer_id):
"""Return unique key from given record.
Returns:
unique_key(str): This is the unique key which is used to search
the dict which holds the overall records
"""
project_key = project_id.replace('.0', '')
customer_key = customer_id.replace('.0', '')
record = [project_status, project_key, customer_key]
invalid_result = project_status not in CHARTIO_GRADES
invalid_project_key = project_key == 'nan'
invalid_customer_key = customer_key == 'nan'
if invalid_result or invalid_project_key or invalid_customer_key:
if invalid_result and project_status == 'nan':
record[0] = None
write_to_csv(no_project_status_path, record)
return False
if project_key == 'nan':
record[1] = None
write_to_csv(no_project_key_path, record)
return False
elif customer_key == 'nan':
record[2] = None
write_to_csv(no_customer_key_path, record)
return False
else:
unique_key = "{x}-{y}".format(x=customer_key, y=project_key)
return unique_key
def translate_result(student_grade):
"""Interprete what a student_grade in one file means in another.
Args:
student_grade(str): a string which represents the grade the student
in one file
Returns:
Student grade equivalent in another file
"""
thesauraus = {
'ungradeable': ['INCOMPLETE', 'UNGRADED', 'SUBMITTED'],
'failed': ['INCOMPLETE'],
'passed': ['PASSED'],
'exceeded': ['DISTINCTION']
}
return thesauraus[student_grade]
def check_status(unique_key, project_status, keys_dict):
"""Compare two status against each other.
Compares two statuses against each other and calls the appropriate function
"""
result_list = translate_result(project_status)
try:
unique_record = keys_dict[unique_key]
project_result = unique_record[3]
if project_result in result_list:
record = list(unique_record)[1:4]
record.append(project_status)
write_to_csv(mathching_records_path, record)
else:
record = list(unique_record)[1:4]
record.append(project_status)
write_to_csv(non_mathching_records_path, record)
except (KeyError, ValueError, TypeError):
account_project_keys = unique_key.split('-')
record = [
account_project_keys[0],
account_project_keys[1],
project_status
]
write_to_csv(records_diff, record)
def compare_keys_with_files(file_path, keys_dict):
"""Go through a file and extract and processes its contents."""
contacts_df = pd.read_csv(file_path)
for contact_info in contacts_df.itertuples():
index, project_status, project_key, customer_key = contact_info
unique_key = get_unique_key(
str(project_status),
str(project_key),
str(customer_key)
)
if unique_key:
check_status(unique_key, str(project_status), keys_dict)
def main():
"""Run all scripts from here.
This is the master script that initiates all the other scripts.
"""
parser = argparse.ArgumentParser()
parser.add_argument('path1', help="Path to first CSV file.")
parser.add_argument('path2', help="Path to second CSV file.")
args = parser.parse_args()
account_project_keys = get_file_df(args.path1)
compare_keys_with_files(args.path2, account_project_keys)
if __name__ == '__main__':
main()
| mit |
yyjiang/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
MatthieuBizien/scikit-learn | sklearn/manifold/setup.py | 24 | 1279 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
pllim/ginga | ginga/rv/plugins/Preferences.py | 1 | 63607 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Make changes to channel settings graphically in the UI.
**Plugin Type: Local**
``Preferences`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
The ``Preferences`` plugin sets the preferences on a per-channel basis.
The preferences for a given channel are inherited from the "Image"
channel until they are explicitly set and saved using this plugin.
If "Save Settings" is pressed, it will save the settings to the user's
home Ginga folder so that when a channel with the same name is created
in future Ginga sessions it will obtain the same settings.
**Color Distribution Preferences**
.. figure:: figures/cdist-prefs.png
:align: center
:alt: Color Distribution preferences
"Color Distribution" preferences.
The "Color Distribution" preferences control the preferences used for the
data value to color index conversion that occurs after cut levels are
applied and just before final color mapping is performed. It concerns
how the values between the low and high cut levels are distributed to
the color and intensity mapping phase.
The "Algorithm" control is used to set the algorithm used for the
mapping. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control. There are eight
algorithms available: linear, log, power, sqrt, squared, asinh, sinh,
and histeq. The name of each algorithm is indicative of how
the data is mapped to the colors in the color map. "linear" is the
default.
**Color Mapping Preferences**
.. figure:: figures/cmap-prefs.png
:align: center
:alt: Color Mapping preferences
"Color Mapping" preferences.
The "Color Mapping" preferences control the preferences used for the
color map and intensity map, used during the final phase of the color
mapping process. Together with the "Color Distribution" preferences, these
control the mapping of data values into a 24-bpp RGB visual representation.
The "Colormap" control selects which color map should be loaded and
used. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control.
The "Intensity" control selects which intensity map should be used
with the color map. The intensity map is applied just before the color
map, and can be used to change the standard linear scale of values into
an inverted scale, logarithmic, etc.
Ginga comes with a good selection of color maps, but should you want
more, you can add custom ones or, if ``matplotlib`` is installed, you
can load all the ones that it has.
See "Customizing Ginga" for details.
**Zoom Preferences**
.. figure:: figures/zoom-prefs.png
:align: center
:alt: Zoom preferences
"Zoom" preferences.
The "Zoom" preferences control Ginga's zooming/scaling behavior.
Ginga supports two zoom algorithms, chosen using the "Zoom Alg" control:
* The "step" algorithm zooms the image inwards in discrete
steps of 1X, 2X, 3X, etc. or outwards in steps of 1/2X, 1/3X, 1/4X,
etc. This algorithm results in the least artifacts visually, but is a
bit slower to zoom over wide ranges when using a scrolling motion
because more "throw" is required to achieve a large zoom change
(this is not the case if one uses of the shortcut zoom keys, such as
the digit keys).
* The "rate" algorithm zooms the image by advancing the scaling at
a rate defined by the value in the "Zoom Rate" box. This rate defaults
to the square root of 2. Larger numbers cause larger changes in scale
between zoom levels. If you like to zoom your images rapidly, at a
small cost in image quality, you would likely want to choose this
option.
Note that regardless of which method is chosen for the zoom algorithm,
the zoom can be controlled by holding down ``Ctrl`` (coarse) or ``Shift``
(fine) while scrolling to constrain the zoom rate (assuming the default
mouse bindings).
The "Stretch XY" control can be used to stretch one of the axes (X or
Y) relative to the other. Select an axis with this control and roll the
scroll wheel while hovering over the "Stretch Factor" control to
stretch the pixels in the selected axis.
The "Scale X" and "Scale Y" controls offer direct access to the
underlying scaling, bypassing the discrete zoom steps. Here, exact
values can be typed to scale the image. Conversely, you will see these
values change as the image is zoomed.
The "Scale Min" and "Scale Max" controls can be used to place a
limit on how much the image can be scaled.
The "Zoom Defaults" button will restore the controls to the Ginga
default values.
**Pan Preferences**
.. figure:: figures/pan-prefs.png
:align: center
:alt: Pan Preferences
"Pan" preferences.
The "Pan" preferences control Ginga's panning behavior.
The "Pan X" and "Pan Y" controls offer direct access to set the pan
position in the image (the part of the image located at the center of
the window) -- you can see them change as you pan around the image.
The "Center Image" button sets the pan position to the center of the
image, as calculated by halving the dimensions in X and Y.
The "Mark Center" check box, when checked, will cause Ginga to draw a
small reticle in the center of the image. This is useful for knowing
the pan position and for debugging.
**Transform Preferences**
.. figure:: figures/transform-prefs.png
:align: center
:alt: Transform Preferences
"Transform" preferences.
The "Transform" preferences provide for transforming the view of the image
by flipping the view in X or Y, swapping the X and Y axes, or rotating
the image in arbitrary amounts.
The "Flip X" and "Flip Y" checkboxes cause the image view to be
flipped in the corresponding axis.
The "Swap XY" checkbox causes the image view to be altered by swapping
the X and Y axes. This can be combined with "Flip X" and "Flip Y" to rotate
the image in 90 degree increments. These views will render more quickly
than arbitrary rotations using the "Rotate" control.
The "Rotate" control will rotate the image view the specified amount.
The value should be specified in degrees. "Rotate" can be specified in
conjunction with flipping and swapping.
The "Restore" button will restore the view to the default view, which
is unflipped, unswapped, and unrotated.
**Auto Cuts Preferences**
.. figure:: figures/autocuts-prefs.png
:align: center
:alt: Auto Cuts Preferences
"Auto Cuts" preferences.
The "Auto Cuts" preferences control the calculation of cut levels for
the view when the auto cut levels button or key is pressed, or when
loading a new image with auto cuts enabled. You can also set the cut
levels manually from here.
The "Cut Low" and "Cut High" fields can be used to manually specify lower
and upper cut levels. Pressing "Cut Levels" will set the levels to these
values manually. If a value is missing, it is assumed to default to the
whatever the current value is.
Pressing "Auto Levels" will calculate the levels according to an algorithm.
The "Auto Method" control is used to choose which auto cuts algorithm
used: "minmax" (minimum maximum values), "median" (based on median
filtering), "histogram" (based on an image histogram), "stddev" (based on
the standard deviation of pixel values), or "zscale" (based on the ZSCALE
algorithm popularized by IRAF).
As the algorithm is changed, the boxes under it may also change to
allow changes to parameters particular to each algorithm.
**WCS Preferences**
.. figure:: figures/wcs-prefs.png
:align: center
:alt: WCS Preferences
"WCS" preferences.
The "WCS" preferences control the display preferences for the World
Coordinate System (WCS) calculations used to report the cursor position in the
image.
The "WCS Coords" control is used to select the coordinate system in
which to display the result.
The "WCS Display" control is used to select a sexagesimal (``H:M:S``)
readout or a decimal degrees readout.
**New Image Preferences**
.. figure:: figures/newimages-prefs.png
:align: center
:alt: New Image Preferences
"New Image" preferences.
The "New Images" preferences determine how Ginga reacts when a new image
is loaded into the channel. This includes when an older image is
revisited by clicking on its thumbnail in the ``Thumbs`` plugin pane.
The "Cut New" setting controls whether an automatic cut-level
calculation should be performed on the new image, or whether the
currently set cut levels should be applied. The possible settings are:
* "on": calculate a new cut levels always;
* "override": calculate a new cut levels until the user overrides
it by manually setting a cut levels, then turn "off"; or
* "off": always use the currently set cut levels.
.. tip:: The "override" setting is provided for the convenience of
having automatic cut levels, while preventing a manually set
cuts from being overridden when a new image is ingested. When
typed in the image window, the semicolon key can be used to
toggle the mode back to override (from "off"), while colon will
set the preference to "on". The ``Info`` panel shows
the state of this setting.
The "Zoom New" setting controls whether a newly visited image should
be zoomed to fit the window. There are three possible values: on,
override, and off:
* "on": the new image is always zoomed to fit;
* "override": images are automatically fitted until the zoom level is
changed manually, then the mode automatically changes to "off", or
* "off": always use the currently set zoom levels.
.. tip:: The "override" setting is provided for the convenience of
having an automatic zoom, while preventing a manually set zoom
level from being overridden when a new image is ingested. When
typed in the image window, the apostrophe (a.k.a. "single quote")
key can be used to toggle the mode back to "override" (from
"off"), while quote (a.k.a. double quote) will set the preference
to "on". The global plugin ``Info`` panel shows the state of this
setting.
The "Center New" box, if checked, will cause newly visited images to
always have the pan position reset to the center of the image. If
unchecked, the pan position is unchanged from the previous image.
The "Follow New" setting is used to control whether Ginga will change
the display if a new image is loaded into the channel. If unchecked,
the image is loaded (as seen, for example, by its appearance in the
``Thumbs`` tab), but the display will not change to the new image. This
setting is useful in cases where new images are being loaded by some
automated means into a channel and the user wishes to study the current
image without being interrupted.
The "Raise New" setting controls whether Ginga will raise the tab of a
channel when an image is loaded into that channel. If unchecked, then
Ginga will not raise the tab when an image is loaded into that
particular channel.
The "Create Thumbnail" setting controls whether Ginga will create a
thumbnail for images loaded into that channel. In cases where many
images are being loaded into a channel frequently (e.g., a low frequency
video feed), it may be undesirable to create thumbnails for all of them.
**General Preferences**
The "Num Images" setting specifies how many images can be retained in
buffers in this channel before being ejected. A value of zero (0) means
unlimited--images will never be ejected. If an image was loaded from
some accessible storage and it is ejected, it will automatically be
reloaded if the image is revisited by navigating the channel.
The "Sort Order" setting determines whether images are sorted in the
channel alphabetically by name or by the time when they were loaded.
This principally affects the order in which images are cycled when using
the up/down "arrow" keys or buttons, and not necessarily how they are
displayed in plugins like "Contents" or "Thumbs" (which generally have
their own setting preference for ordering).
The "Use scrollbars" check box controls whether the channel viewer will
show scroll bars around the edge of the viewer frame.
**Remember Preferences**
When an image is loaded, a profile is created and attached to the image
metadata in the channel. These profiles are continuously updated with
viewer state as the image is manipulated. The "Remember" preferences
control which parts of these profiles are restored to the viewer state
when the image is navigated to in the channel:
* "Restore Scale" will restore the zoom (scale) level
* "Restore Pan" will restore the pan position
* "Restore Transform" will restore any flip or swap axes transforms
* "Restore Rotation" will restore any rotation of the image
* "Restore Cuts" will restore any cut levels for the image
* "Restore Scale" will restore any coloring adjustments made (including
color map, color distribution, contrast/stretch, etc.)
"""
import math
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga import cmap, imap, trcalc
from ginga import GingaPlugin
from ginga import AutoCuts, ColorDist
from ginga.util import wcs, wcsmod, rgb_cms
__all_ = ['Preferences']
class Preferences(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Preferences, self).__init__(fv, fitsimage)
self.cmap_names = cmap.get_names()
self.imap_names = imap.get_names()
self.zoomalg_names = ('step', 'rate')
# get Preferences preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Preferences')
self.settings.add_defaults(orientation=None)
self.settings.load(onError='silent')
self.t_ = self.fitsimage.get_settings()
self.autocuts_cache = {}
self.gui_up = False
self.calg_names = ColorDist.get_dist_names()
self.autozoom_options = self.fitsimage.get_autozoom_options()
self.autocut_options = self.fitsimage.get_autocuts_options()
self.autocut_methods = self.fitsimage.get_autocut_methods()
self.autocenter_options = self.fitsimage.get_autocenter_options()
self.pancoord_options = ('data', 'wcs')
self.sort_options = ('loadtime', 'alpha')
for key in ['color_map', 'intensity_map',
'color_algorithm', 'color_hashsize']:
self.t_.get_setting(key).add_callback(
'set', self.rgbmap_changed_ext_cb)
self.t_.get_setting('autozoom').add_callback(
'set', self.autozoom_changed_ext_cb)
self.t_.get_setting('autocenter').add_callback(
'set', self.autocenter_changed_ext_cb)
self.t_.get_setting('autocuts').add_callback(
'set', self.autocuts_changed_ext_cb)
for key in ['switchnew', 'raisenew', 'genthumb']:
self.t_.get_setting(key).add_callback(
'set', self.set_chprefs_ext_cb)
for key in ['pan']:
self.t_.get_setting(key).add_callback(
'set', self.pan_changed_ext_cb)
for key in ['scale']:
self.t_.get_setting(key).add_callback(
'set', self.scale_changed_ext_cb)
self.t_.get_setting('zoom_algorithm').add_callback(
'set', self.set_zoomalg_ext_cb)
self.t_.get_setting('zoom_rate').add_callback(
'set', self.set_zoomrate_ext_cb)
for key in ['scale_x_base', 'scale_y_base']:
self.t_.get_setting(key).add_callback(
'set', self.scalebase_changed_ext_cb)
self.t_.get_setting('rot_deg').add_callback(
'set', self.set_rotate_ext_cb)
for name in ('flip_x', 'flip_y', 'swap_xy'):
self.t_.get_setting(name).add_callback(
'set', self.set_transform_ext_cb)
self.t_.get_setting('autocut_method').add_callback('set',
self.set_autocut_method_ext_cb)
self.t_.get_setting('autocut_params').add_callback('set',
self.set_autocut_params_ext_cb)
self.t_.get_setting('cuts').add_callback(
'set', self.cutset_cb)
self.t_.setdefault('wcs_coords', 'icrs')
self.t_.setdefault('wcs_display', 'sexagesimal')
# buffer len (number of images in memory)
self.t_.add_defaults(numImages=4)
self.t_.get_setting('numImages').add_callback('set', self.set_buflen_ext_cb)
# preload images
self.t_.add_defaults(preload_images=False)
self.icc_profiles = list(rgb_cms.get_profiles())
self.icc_profiles.insert(0, None)
self.icc_intents = rgb_cms.get_intents()
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
self.orientation = orientation
#vbox.set_border_width(4)
vbox.set_spacing(2)
# COLOR DISTRIBUTION OPTIONS
fr = Widgets.Frame("Color Distribution")
captions = (('Algorithm:', 'label', 'Algorithm', 'combobox'),
#('Table Size:', 'label', 'Table Size', 'entryset'),
('Dist Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.calg_choice = b.algorithm
#self.w.table_size = b.table_size
b.algorithm.set_tooltip("Choose a color distribution algorithm")
#b.table_size.set_tooltip("Set size of the distribution hash table")
b.dist_defaults.set_tooltip("Restore color distribution defaults")
b.dist_defaults.add_callback('activated',
lambda w: self.set_default_distmaps())
combobox = b.algorithm
options = []
index = 0
for name in self.calg_names:
options.append(name)
combobox.append_text(name)
index += 1
try:
index = self.calg_names.index(self.t_.get('color_algorithm',
"linear"))
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_calg_cb)
## entry = b.table_size
## entry.set_text(str(self.t_.get('color_hashsize', 65535)))
## entry.add_callback('activated', self.set_tablesize_cb)
fr.set_widget(w)
vbox.add_widget(fr)
# COLOR MAPPING OPTIONS
fr = Widgets.Frame("Color Mapping")
captions = (('Colormap:', 'label', 'Colormap', 'combobox'),
('Intensity:', 'label', 'Intensity', 'combobox'),
('Color Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.cmap_choice = b.colormap
self.w.imap_choice = b.intensity
b.color_defaults.add_callback('activated',
lambda w: self.set_default_cmaps())
b.colormap.set_tooltip("Choose a color map for this image")
b.intensity.set_tooltip("Choose an intensity map for this image")
b.color_defaults.set_tooltip("Restore default color and intensity maps")
fr.set_widget(w)
vbox.add_widget(fr)
combobox = b.colormap
options = []
index = 0
for name in self.cmap_names:
options.append(name)
combobox.append_text(name)
index += 1
cmap_name = self.t_.get('color_map', "gray")
try:
index = self.cmap_names.index(cmap_name)
except Exception:
index = self.cmap_names.index('gray')
combobox.set_index(index)
combobox.add_callback('activated', self.set_cmap_cb)
combobox = b.intensity
options = []
index = 0
for name in self.imap_names:
options.append(name)
combobox.append_text(name)
index += 1
imap_name = self.t_.get('intensity_map', "ramp")
try:
index = self.imap_names.index(imap_name)
except Exception:
index = self.imap_names.index('ramp')
combobox.set_index(index)
combobox.add_callback('activated', self.set_imap_cb)
# AUTOCUTS OPTIONS
fr = Widgets.Frame("Auto Cuts")
vbox2 = Widgets.VBox()
fr.set_widget(vbox2)
captions = (('Cut Low:', 'label', 'Cut Low Value', 'llabel',
'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High Value', 'llabel',
'Cut High', 'entry'),
('spacer_1', 'spacer', 'spacer_2', 'spacer',
'Cut Levels', 'button'),
('Auto Method:', 'label', 'Auto Method', 'combobox',
'Auto Levels', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
loval, hival = self.t_['cuts']
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_low.set_length(9)
b.cut_low_value.set_text('%.4g' % (loval))
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.cut_high.set_length(9)
b.cut_high_value.set_text('%.4g' % (hival))
b.cut_low.add_callback('activated', self.cut_levels)
b.cut_high.add_callback('activated', self.cut_levels)
b.cut_levels.add_callback('activated', self.cut_levels)
b.auto_levels.add_callback('activated', self.auto_levels)
# Setup auto cuts method choice
combobox = b.auto_method
index = 0
method = self.t_.get('autocut_method', "histogram")
for name in self.autocut_methods:
combobox.append_text(name)
index += 1
try:
index = self.autocut_methods.index(method)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_autocut_method_cb)
b.auto_method.set_tooltip("Choose algorithm for auto levels")
vbox2.add_widget(w, stretch=0)
self.w.acvbox = Widgets.VBox()
vbox2.add_widget(self.w.acvbox, stretch=1)
vbox.add_widget(fr, stretch=0)
# TRANSFORM OPTIONS
fr = Widgets.Frame("Transform")
captions = (('Flip X', 'checkbutton', 'Flip Y', 'checkbutton',
'Swap XY', 'checkbutton'),
('Rotate:', 'label', 'Rotate', 'spinfloat'),
('Restore', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
for name in ('flip_x', 'flip_y', 'swap_xy'):
btn = b[name]
btn.set_state(self.t_.get(name, False))
btn.add_callback('activated', self.set_transforms_cb)
b.flip_x.set_tooltip("Flip the image around the X axis")
b.flip_y.set_tooltip("Flip the image around the Y axis")
b.swap_xy.set_tooltip("Swap the X and Y axes in the image")
b.rotate.set_tooltip("Rotate the image around the pan position")
b.restore.set_tooltip("Clear any transforms and center image")
b.restore.add_callback('activated', self.restore_cb)
b.rotate.set_limits(0.00, 359.99999999, incr_value=10.0)
b.rotate.set_value(0.00)
b.rotate.set_decimals(8)
b.rotate.add_callback('value-changed', self.rotate_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# WCS OPTIONS
fr = Widgets.Frame("WCS")
captions = (('WCS Coords:', 'label', 'WCS Coords', 'combobox'),
('WCS Display:', 'label', 'WCS Display', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.wcs_coords.set_tooltip("Set WCS coordinate system")
b.wcs_display.set_tooltip("Set WCS display format")
# Setup WCS coords method choice
combobox = b.wcs_coords
index = 0
for name in wcsmod.coord_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_coords', "")
try:
index = wcsmod.coord_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
# Setup WCS display format method choice
combobox = b.wcs_display
index = 0
for name in wcsmod.display_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# ZOOM OPTIONS
fr = Widgets.Frame("Zoom")
captions = (('Zoom Alg:', 'label', 'Zoom Alg', 'combobox'),
('Zoom Rate:', 'label', 'Zoom Rate', 'spinfloat'),
('Stretch XY:', 'label', 'Stretch XY', 'combobox'),
('Stretch Factor:', 'label', 'Stretch Factor', 'spinfloat'),
('Scale X:', 'label', 'Scale X', 'entryset'),
('Scale Y:', 'label', 'Scale Y', 'entryset'),
('Scale Min:', 'label', 'Scale Min', 'entryset'),
('Scale Max:', 'label', 'Scale Max', 'entryset'),
('Interpolation:', 'label', 'Interpolation', 'combobox'),
('Zoom Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
index = 0
for name in self.zoomalg_names:
b.zoom_alg.append_text(name.capitalize())
index += 1
zoomalg = self.t_.get('zoom_algorithm', "step")
try:
index = self.zoomalg_names.index(zoomalg)
b.zoom_alg.set_index(index)
except Exception:
pass
b.zoom_alg.set_tooltip("Choose Zoom algorithm")
b.zoom_alg.add_callback('activated', self.set_zoomalg_cb)
index = 0
for name in ('X', 'Y'):
b.stretch_xy.append_text(name)
index += 1
b.stretch_xy.set_index(0)
b.stretch_xy.set_tooltip("Stretch pixels in X or Y")
b.stretch_xy.add_callback('activated', self.set_stretch_cb)
b.stretch_factor.set_limits(1.0, 10.0, incr_value=0.10)
b.stretch_factor.set_value(1.0)
b.stretch_factor.set_decimals(8)
b.stretch_factor.add_callback('value-changed', self.set_stretch_cb)
b.stretch_factor.set_tooltip("Length of pixel relative to 1 on other side")
b.stretch_factor.set_enabled(zoomalg != 'step')
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
b.zoom_rate.set_limits(1.01, 10.0, incr_value=0.1)
b.zoom_rate.set_value(zoomrate)
b.zoom_rate.set_decimals(8)
b.zoom_rate.set_enabled(zoomalg != 'step')
b.zoom_rate.set_tooltip("Step rate of increase/decrease per zoom level")
b.zoom_rate.add_callback('value-changed', self.set_zoomrate_cb)
b.zoom_defaults.add_callback('activated', self.set_zoom_defaults_cb)
scale_x, scale_y = self.fitsimage.get_scale_xy()
b.scale_x.set_tooltip("Set the scale in X axis")
b.scale_x.set_text(str(scale_x))
b.scale_x.add_callback('activated', self.set_scale_cb)
b.scale_y.set_tooltip("Set the scale in Y axis")
b.scale_y.set_text(str(scale_y))
b.scale_y.add_callback('activated', self.set_scale_cb)
scale_min, scale_max = self.t_['scale_min'], self.t_['scale_max']
b.scale_min.set_text(str(scale_min))
b.scale_min.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the minimum allowed scale in any axis")
b.scale_max.set_text(str(scale_max))
b.scale_max.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the maximum allowed scale in any axis")
index = 0
for name in trcalc.interpolation_methods:
b.interpolation.append_text(name)
index += 1
interp = self.t_.get('interpolation', "basic")
try:
index = trcalc.interpolation_methods.index(interp)
except ValueError:
# previous choice might not be available if preferences
# were saved when opencv was being used--if so, default
# to "basic"
index = trcalc.interpolation_methods.index('basic')
b.interpolation.set_index(index)
b.interpolation.set_tooltip("Choose interpolation method")
b.interpolation.add_callback('activated', self.set_interp_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# PAN OPTIONS
fr = Widgets.Frame("Panning")
captions = (('Pan X:', 'label', 'Pan X', 'entry',
'WCS sexagesimal', 'checkbutton'),
('Pan Y:', 'label', 'Pan Y', 'entry',
'Apply Pan', 'button'),
('Pan Coord:', 'label', 'Pan Coord', 'combobox'),
('Center Image', 'button', 'Mark Center', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
pan_x, pan_y = self.fitsimage.get_pan()
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_coord = self.t_.get('pan_coord', "data")
if pan_coord == 'data':
pan_x, pan_y = pan_x + coord_offset, pan_y + coord_offset
b.pan_x.set_tooltip("Coordinate for the pan position in X axis")
b.pan_x.set_text(str(pan_x))
#b.pan_x.add_callback('activated', self.set_pan_cb)
b.pan_y.set_tooltip("Coordinate for the pan position in Y axis")
b.pan_y.set_text(str(pan_y))
#b.pan_y.add_callback('activated', self.set_pan_cb)
b.apply_pan.add_callback('activated', self.set_pan_cb)
b.apply_pan.set_tooltip("Set the pan position")
b.wcs_sexagesimal.set_tooltip("Display pan position in sexagesimal")
b.wcs_sexagesimal.add_callback('activated',
lambda w, tf: self._update_pan_coords())
index = 0
for name in self.pancoord_options:
b.pan_coord.append_text(name)
index += 1
index = self.pancoord_options.index(pan_coord)
b.pan_coord.set_index(index)
b.pan_coord.set_tooltip("Pan coordinates type")
b.pan_coord.add_callback('activated', self.set_pan_coord_cb)
b.center_image.set_tooltip("Set the pan position to center of the image")
b.center_image.add_callback('activated', self.center_image_cb)
b.mark_center.set_tooltip("Mark the center (pan locator)")
b.mark_center.add_callback('activated', self.set_misc_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("New Images")
captions = (('Cut New:', 'label', 'Cut New', 'combobox'),
('Zoom New:', 'label', 'Zoom New', 'combobox'),
('Center New:', 'label', 'Center New', 'combobox'),
('Follow New', 'checkbutton', 'Raise New', 'checkbutton'),
('Create thumbnail', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.cut_new
index = 0
for name in self.autocut_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocuts', "off")
index = self.autocut_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocuts_cb)
b.cut_new.set_tooltip("Automatically set cut levels for new images")
combobox = b.zoom_new
index = 0
for name in self.autozoom_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autozoom', "off")
index = self.autozoom_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autozoom_cb)
b.zoom_new.set_tooltip("Automatically fit new images to window")
combobox = b.center_new
index = 0
for name in self.autocenter_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocenter', "off")
# Hack to convert old values that used to be T/F
if isinstance(option, bool):
choice = {True: 'on', False: 'off'}
option = choice[option]
index = self.autocenter_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocenter_cb)
b.center_new.set_tooltip("Automatically center new images in window")
b.follow_new.set_tooltip("View new images as they arrive")
b.raise_new.set_tooltip("Raise and focus tab for new images")
b.create_thumbnail.set_tooltip("Create thumbnail for new images")
self.w.follow_new.set_state(True)
self.w.follow_new.add_callback('activated', self.set_chprefs_cb)
self.w.raise_new.set_state(True)
self.w.raise_new.add_callback('activated', self.set_chprefs_cb)
self.w.create_thumbnail.set_state(True)
self.w.create_thumbnail.add_callback('activated', self.set_chprefs_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
exp = Widgets.Expander("General")
captions = (('Num Images:', 'label', 'Num Images', 'entryset'),
('Sort Order:', 'label', 'Sort Order', 'combobox'),
('Use scrollbars', 'checkbutton',
'Preload Images', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.num_images.set_tooltip(
"Maximum number of in memory images in channel (0==unlimited)")
num_images = self.t_.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
self.w.num_images.add_callback('activated', self.set_buffer_cb)
combobox = b.sort_order
index = 0
for name in self.sort_options:
combobox.append_text(name)
index += 1
option = self.t_.get('sort_order', 'loadtime')
index = self.sort_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_sort_cb)
b.sort_order.set_tooltip("Sort order for images in channel")
scrollbars = self.t_.get('scrollbars', 'off')
self.w.use_scrollbars.set_state(scrollbars in ['on', 'auto'])
self.w.use_scrollbars.add_callback('activated', self.set_scrollbars_cb)
b.use_scrollbars.set_tooltip("Use scrollbars around viewer")
preload_images = self.t_.get('preload_images', False)
self.w.preload_images.set_state(preload_images)
self.w.preload_images.add_callback('activated', self.set_preload_cb)
b.preload_images.set_tooltip(
"Preload adjacent images to speed up access")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("Remember")
captions = (('Restore Scale', 'checkbutton',
'Restore Pan', 'checkbutton'),
('Restore Transform', 'checkbutton',
'Restore Rotation', 'checkbutton'),
('Restore Cuts', 'checkbutton',
'Restore Color Map', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.restore_scale.set_state(self.t_.get('profile_use_scale', False))
self.w.restore_scale.add_callback('activated', self.set_profile_cb)
self.w.restore_scale.set_tooltip("Remember scale with image")
self.w.restore_pan.set_state(self.t_.get('profile_use_pan', False))
self.w.restore_pan.add_callback('activated', self.set_profile_cb)
self.w.restore_pan.set_tooltip("Remember pan position with image")
self.w.restore_transform.set_state(
self.t_.get('profile_use_transform', False))
self.w.restore_transform.add_callback('activated', self.set_profile_cb)
self.w.restore_transform.set_tooltip("Remember transform with image")
self.w.restore_rotation.set_state(
self.t_.get('profile_use_rotation', False))
self.w.restore_rotation.add_callback('activated', self.set_profile_cb)
self.w.restore_rotation.set_tooltip("Remember rotation with image")
self.w.restore_cuts.set_state(self.t_.get('profile_use_cuts', False))
self.w.restore_cuts.add_callback('activated', self.set_profile_cb)
self.w.restore_cuts.set_tooltip("Remember cut levels with image")
self.w.restore_color_map.set_state(
self.t_.get('profile_use_color_map', False))
self.w.restore_color_map.add_callback('activated', self.set_profile_cb)
self.w.restore_color_map.set_tooltip("Remember color map with image")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("ICC Profiles")
captions = (('Output ICC profile:', 'label', 'Output ICC profile',
'combobox'),
('Rendering intent:', 'label', 'Rendering intent',
'combobox'),
('Proof ICC profile:', 'label', 'Proof ICC profile',
'combobox'),
('Proof intent:', 'label', 'Proof intent', 'combobox'),
('__x', 'spacer', 'Black point compensation', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
value = self.t_.get('icc_output_profile', None)
combobox = b.output_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for the viewer display")
value = self.t_.get('icc_output_intent', 'perceptual')
combobox = b.rendering_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for the viewer display")
value = self.t_.get('icc_proof_profile', None)
combobox = b.proof_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for soft proofing")
value = self.t_.get('icc_proof_intent', None)
combobox = b.proof_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for soft proofing")
value = self.t_.get('icc_black_point_compensation', False)
b.black_point_compensation.set_state(value)
b.black_point_compensation.add_callback(
'activated', self.set_icc_profile_cb)
b.black_point_compensation.set_tooltip("Use black point compensation")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Save Settings")
btn.add_callback('activated', lambda w: self.save_preferences())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_cmap_cb(self, w, index):
"""This callback is invoked when the user selects a new color
map from the preferences pane."""
name = cmap.get_names()[index]
self.t_.set(color_map=name)
def set_imap_cb(self, w, index):
"""This callback is invoked when the user selects a new intensity
map from the preferences pane."""
name = imap.get_names()[index]
self.t_.set(intensity_map=name)
def set_calg_cb(self, w, index):
"""This callback is invoked when the user selects a new color
hashing algorithm from the preferences pane."""
#index = w.get_index()
name = self.calg_names[index]
self.t_.set(color_algorithm=name)
def set_tablesize_cb(self, w):
value = int(w.get_text())
self.t_.set(color_hashsize=value)
def set_default_cmaps(self):
cmap_name = "gray"
imap_name = "ramp"
index = self.cmap_names.index(cmap_name)
self.w.cmap_choice.set_index(index)
index = self.imap_names.index(imap_name)
self.w.imap_choice.set_index(index)
self.t_.set(color_map=cmap_name, intensity_map=imap_name)
def set_default_distmaps(self):
name = 'linear'
index = self.calg_names.index(name)
self.w.calg_choice.set_index(index)
hashsize = 65535
## self.w.table_size.set_text(str(hashsize))
self.t_.set(color_algorithm=name, color_hashsize=hashsize)
def set_zoomrate_cb(self, w, rate):
self.t_.set(zoom_rate=rate)
def set_zoomrate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.zoom_rate.set_value(value)
def set_zoomalg_cb(self, w, idx):
self.t_.set(zoom_algorithm=self.zoomalg_names[idx])
def set_zoomalg_ext_cb(self, setting, value):
if not self.gui_up:
return
if value == 'step':
self.w.zoom_alg.set_index(0)
self.w.zoom_rate.set_enabled(False)
self.w.stretch_factor.set_enabled(False)
else:
self.w.zoom_alg.set_index(1)
self.w.zoom_rate.set_enabled(True)
self.w.stretch_factor.set_enabled(True)
def set_interp_cb(self, w, idx):
self.t_.set(interpolation=trcalc.interpolation_methods[idx])
def scalebase_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x_base, scale_y_base = self.fitsimage.get_scale_base_xy()
ratio = float(scale_x_base) / float(scale_y_base)
if ratio < 1.0:
# Y is stretched
idx = 1
ratio = 1.0 / ratio
elif ratio > 1.0:
# X is stretched
idx = 0
else:
idx = self.w.stretch_xy.get_index()
# Update stretch controls to reflect actual scale
self.w.stretch_xy.set_index(idx)
self.w.stretch_factor.set_value(ratio)
def set_zoom_defaults_cb(self, w):
rate = math.sqrt(2.0)
self.w.stretch_factor.set_value(1.0)
self.t_.set(zoom_algorithm='step', zoom_rate=rate,
scale_x_base=1.0, scale_y_base=1.0)
def set_stretch_cb(self, *args):
axis = self.w.stretch_xy.get_index()
value = self.w.stretch_factor.get_value()
if axis == 0:
self.t_.set(scale_x_base=value, scale_y_base=1.0)
else:
self.t_.set(scale_x_base=1.0, scale_y_base=value)
def set_autocenter_cb(self, w, idx):
option = self.autocenter_options[idx]
self.fitsimage.set_autocenter(option)
self.t_.set(autocenter=option)
def autocenter_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autocenter_options.index(option)
self.w.center_new.set_index(index)
def set_scale_cb(self, w, val):
scale_x = float(self.w.scale_x.get_text())
scale_y = float(self.w.scale_y.get_text())
self.fitsimage.scale_to(scale_x, scale_y)
def scale_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x, scale_y = value
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
def set_scale_limit_cb(self, *args):
scale_min = self.w.scale_min.get_text().lower()
if scale_min == 'none':
scale_min = None
else:
scale_min = float(scale_min)
scale_max = self.w.scale_max.get_text().lower()
if scale_max == 'none':
scale_max = None
else:
scale_max = float(scale_max)
self.t_.set(scale_min=scale_min, scale_max=scale_max)
def set_autozoom_cb(self, w, idx):
option = self.autozoom_options[idx]
self.fitsimage.enable_autozoom(option)
self.t_.set(autozoom=option)
def autozoom_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autozoom_options.index(option)
self.w.zoom_new.set_index(index)
def cut_levels(self, w):
fitsimage = self.fitsimage
loval, hival = fitsimage.get_cut_levels()
try:
lostr = self.w.cut_low.get_text().strip()
if lostr != '':
loval = float(lostr)
histr = self.w.cut_high.get_text().strip()
if histr != '':
hival = float(histr)
self.logger.debug("locut=%f hicut=%f" % (loval, hival))
return fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.show_error("Error cutting levels: %s" % (str(e)))
return True
def auto_levels(self, w):
self.fitsimage.auto_levels()
def cutset_cb(self, setting, value):
if not self.gui_up:
return
loval, hival = value
self.w.cut_low_value.set_text('%.4g' % (loval))
self.w.cut_high_value.set_text('%.4g' % (hival))
def config_autocut_params(self, method):
try:
index = self.autocut_methods.index(method)
self.w.auto_method.set_index(index)
except Exception:
pass
# remove old params
self.w.acvbox.remove_all()
# Create new autocuts object of the right kind
ac_class = AutoCuts.get_autocuts(method)
# Build up a set of control widgets for the autocuts
# algorithm tweakable parameters
paramlst = ac_class.get_params_metadata()
# Get the canonical version of this object stored in our cache
# and make a ParamSet from it
params = self.autocuts_cache.setdefault(method, Bunch.Bunch())
self.ac_params = ParamSet.ParamSet(self.logger, params)
# Build widgets for the parameter/attribute list
w = self.ac_params.build_params(paramlst,
orientation=self.orientation)
self.ac_params.add_callback('changed', self.autocut_params_changed_cb)
# Add this set of widgets to the pane
self.w.acvbox.add_widget(w, stretch=1)
def set_autocut_method_ext_cb(self, setting, value):
if not self.gui_up:
return
autocut_method = self.t_['autocut_method']
self.fv.gui_do(self.config_autocut_params, autocut_method)
def set_autocut_params_ext_cb(self, setting, value):
if not self.gui_up:
return
params = self.t_['autocut_params']
params_d = dict(params) # noqa
self.ac_params.update_params(params_d)
#self.fv.gui_do(self.ac_params.params_to_widgets)
def set_autocut_method_cb(self, w, idx):
method = self.autocut_methods[idx]
self.config_autocut_params(method)
args, kwdargs = self.ac_params.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_method=method, autocut_params=params)
def autocut_params_changed_cb(self, paramObj, ac_obj):
"""This callback is called when the user changes the attributes of
an object via the paramSet.
"""
args, kwdargs = paramObj.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_params=params)
def set_autocuts_cb(self, w, index):
option = self.autocut_options[index]
self.fitsimage.enable_autocuts(option)
self.t_.set(autocuts=option)
def autocuts_changed_ext_cb(self, setting, option):
self.logger.debug("autocuts changed to %s" % option)
index = self.autocut_options.index(option)
if self.gui_up:
self.w.cut_new.set_index(index)
def set_transforms_cb(self, *args):
flip_x = self.w.flip_x.get_state()
flip_y = self.w.flip_y.get_state()
swap_xy = self.w.swap_xy.get_state()
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
return True
def set_transform_ext_cb(self, setting, value):
if not self.gui_up:
return
flip_x, flip_y, swap_xy = (
self.t_['flip_x'], self.t_['flip_y'], self.t_['swap_xy'])
self.w.flip_x.set_state(flip_x)
self.w.flip_y.set_state(flip_y)
self.w.swap_xy.set_state(swap_xy)
def rgbmap_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
calg_name = self.t_['color_algorithm']
try:
idx = self.calg_names.index(calg_name)
except IndexError:
idx = 0
self.w.algorithm.set_index(idx)
cmap_name = self.t_['color_map']
try:
idx = self.cmap_names.index(cmap_name)
except IndexError:
idx = 0
self.w.colormap.set_index(idx)
imap_name = self.t_['intensity_map']
try:
idx = self.imap_names.index(imap_name)
except IndexError:
idx = 0
self.w.intensity.set_index(idx)
def set_buflen_ext_cb(self, setting, value):
num_images = self.t_['numImages']
# update the datasrc length
chinfo = self.channel
chinfo.datasrc.set_bufsize(num_images)
self.logger.debug("num images was set to {0}".format(num_images))
if not self.gui_up:
return
self.w.num_images.set_text(str(num_images))
def set_sort_cb(self, w, index):
"""This callback is invoked when the user selects a new sort order
from the preferences pane."""
name = self.sort_options[index]
self.t_.set(sort_order=name)
def set_preload_cb(self, w, tf):
"""This callback is invoked when the user checks the preload images
box in the preferences pane."""
self.t_.set(preload_images=tf)
def set_scrollbars_cb(self, w, tf):
"""This callback is invoked when the user checks the 'Use Scrollbars'
box in the preferences pane."""
scrollbars = 'on' if tf else 'off'
self.t_.set(scrollbars=scrollbars)
def set_icc_profile_cb(self, setting, idx):
idx = self.w.output_icc_profile.get_index()
output_profile_name = self.icc_profiles[idx]
idx = self.w.rendering_intent.get_index()
intent_name = self.icc_intents[idx]
idx = self.w.proof_icc_profile.get_index()
proof_profile_name = self.icc_profiles[idx]
idx = self.w.proof_intent.get_index()
proof_intent = self.icc_intents[idx]
bpc = self.w.black_point_compensation.get_state()
self.t_.set(icc_output_profile=output_profile_name,
icc_output_intent=intent_name,
icc_proof_profile=proof_profile_name,
icc_proof_intent=proof_intent,
icc_black_point_compensation=bpc)
return True
def rotate_cb(self, w, deg):
#deg = self.w.rotate.get_value()
self.t_.set(rot_deg=deg)
return True
def set_rotate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.rotate.set_value(value)
return True
def center_image_cb(self, *args):
self.fitsimage.center_image()
return True
def pan_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
self._update_pan_coords()
def set_pan_cb(self, *args):
idx = self.w.pan_coord.get_index()
pan_coord = self.pancoord_options[idx]
pan_xs = self.w.pan_x.get_text().strip()
pan_ys = self.w.pan_y.get_text().strip()
# TODO: use current value for other coord if only one coord supplied
if (':' in pan_xs) or (':' in pan_ys):
# TODO: get maximal precision
pan_x = wcs.hmsStrToDeg(pan_xs)
pan_y = wcs.dmsStrToDeg(pan_ys)
pan_coord = 'wcs'
elif pan_coord == 'wcs':
pan_x = float(pan_xs)
pan_y = float(pan_ys)
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x = float(pan_xs) - coord_offset
pan_y = float(pan_ys) - coord_offset
self.fitsimage.set_pan(pan_x, pan_y, coord=pan_coord)
return True
def _update_pan_coords(self):
pan_coord = self.t_.get('pan_coord', 'data')
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
#self.logger.debug("updating pan coords (%s) %f %f" % (pan_coord, pan_x, pan_y))
if pan_coord == 'wcs':
use_sex = self.w.wcs_sexagesimal.get_state()
if use_sex:
pan_x = wcs.raDegToString(pan_x, format='%02d:%02d:%010.7f')
pan_y = wcs.decDegToString(pan_y, format='%s%02d:%02d:%09.7f')
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x += coord_offset
pan_y += coord_offset
self.w.pan_x.set_text(str(pan_x))
self.w.pan_y.set_text(str(pan_y))
index = self.pancoord_options.index(pan_coord)
self.w.pan_coord.set_index(index)
def set_pan_coord_cb(self, w, idx):
pan_coord = self.pancoord_options[idx]
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
self.t_.set(pan=(pan_x, pan_y), pan_coord=pan_coord)
#self._update_pan_coords()
return True
def restore_cb(self, *args):
self.t_.set(flip_x=False, flip_y=False, swap_xy=False,
rot_deg=0.0)
self.fitsimage.center_image()
return True
def set_misc_cb(self, *args):
markc = (self.w.mark_center.get_state() != 0)
self.t_.set(show_pan_position=markc)
self.fitsimage.show_pan_mark(markc)
return True
def set_chprefs_cb(self, *args):
switchnew = (self.w.follow_new.get_state() != 0)
raisenew = (self.w.raise_new.get_state() != 0)
genthumb = (self.w.create_thumbnail.get_state() != 0)
self.t_.set(switchnew=switchnew, raisenew=raisenew,
genthumb=genthumb)
def set_chprefs_ext_cb(self, *args):
if self.gui_up:
self.w.follow_new.set_state(self.t_['switchnew'])
self.w.raise_new.set_state(self.t_['raisenew'])
self.w.create_thumbnail.set_state(self.t_['genthumb'])
def set_profile_cb(self, *args):
restore_scale = (self.w.restore_scale.get_state() != 0)
restore_pan = (self.w.restore_pan.get_state() != 0)
restore_cuts = (self.w.restore_cuts.get_state() != 0)
restore_transform = (self.w.restore_transform.get_state() != 0)
restore_rotation = (self.w.restore_rotation.get_state() != 0)
restore_color_map = (self.w.restore_color_map.get_state() != 0)
self.t_.set(profile_use_scale=restore_scale, profile_use_pan=restore_pan,
profile_use_cuts=restore_cuts,
profile_use_transform=restore_transform,
profile_use_rotation=restore_rotation,
profile_use_color_map=restore_color_map)
def set_buffer_cb(self, *args):
num_images = int(self.w.num_images.get_text())
self.logger.debug("setting num images {0}".format(num_images))
self.t_.set(numImages=num_images)
def set_wcs_params_cb(self, *args):
idx = self.w.wcs_coords.get_index()
try:
ctype = wcsmod.coord_types[idx]
except IndexError:
ctype = 'icrs'
idx = self.w.wcs_display.get_index()
dtype = wcsmod.display_types[idx]
self.t_.set(wcs_coords=ctype, wcs_display=dtype)
def preferences_to_controls(self):
prefs = self.t_
# color map
rgbmap = self.fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
try:
index = self.cmap_names.index(cm.name)
except ValueError:
# may be a custom color map installed
index = 0
self.w.cmap_choice.set_index(index)
# color dist algorithm
calg = rgbmap.get_hash_algorithm()
index = self.calg_names.index(calg)
self.w.calg_choice.set_index(index)
## size = rgbmap.get_hash_size()
## self.w.table_size.set_text(str(size))
# intensity map
im = rgbmap.get_imap()
try:
index = self.imap_names.index(im.name)
except ValueError:
# may be a custom intensity map installed
index = 0
self.w.imap_choice.set_index(index)
# TODO: this is a HACK to get around Qt's callbacks
# on setting widget values--need a way to disable callbacks
# for direct setting
auto_zoom = prefs.get('autozoom', 'off')
# zoom settings
zoomalg = prefs.get('zoom_algorithm', "step")
index = self.zoomalg_names.index(zoomalg)
self.w.zoom_alg.set_index(index)
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
self.w.zoom_rate.set_value(zoomrate)
self.w.zoom_rate.set_enabled(zoomalg != 'step')
self.w.stretch_factor.set_enabled(zoomalg != 'step')
self.scalebase_changed_ext_cb(prefs, None)
scale_x, scale_y = self.fitsimage.get_scale_xy()
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
scale_min = prefs.get('scale_min', None)
self.w.scale_min.set_text(str(scale_min))
scale_max = prefs.get('scale_max', None)
self.w.scale_max.set_text(str(scale_max))
# panning settings
self._update_pan_coords()
self.w.mark_center.set_state(prefs.get('show_pan_position', False))
# transform settings
self.w.flip_x.set_state(prefs.get('flip_x', False))
self.w.flip_y.set_state(prefs.get('flip_y', False))
self.w.swap_xy.set_state(prefs.get('swap_xy', False))
self.w.rotate.set_value(prefs.get('rot_deg', 0.00))
# auto cuts settings
autocuts = prefs.get('autocuts', 'off')
index = self.autocut_options.index(autocuts)
self.w.cut_new.set_index(index)
autocut_method = prefs.get('autocut_method', None)
if autocut_method is None:
autocut_method = 'histogram'
else:
## params = prefs.get('autocut_params', {})
## p = self.autocuts_cache.setdefault(autocut_method, {})
## p.update(params)
pass
self.config_autocut_params(autocut_method)
# auto zoom settings
auto_zoom = prefs.get('autozoom', 'off')
index = self.autozoom_options.index(auto_zoom)
self.w.zoom_new.set_index(index)
# wcs settings
method = prefs.get('wcs_coords', "icrs")
try:
index = wcsmod.coord_types.index(method)
self.w.wcs_coords.set_index(index)
except ValueError:
pass
method = prefs.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
self.w.wcs_display.set_index(index)
except ValueError:
pass
# misc settings
prefs.setdefault('switchnew', True)
self.w.follow_new.set_state(prefs['switchnew'])
prefs.setdefault('raisenew', True)
self.w.raise_new.set_state(prefs['raisenew'])
prefs.setdefault('genthumb', True)
self.w.create_thumbnail.set_state(prefs['genthumb'])
num_images = prefs.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
prefs.setdefault('preload_images', False)
self.w.preload_images.set_state(prefs['preload_images'])
# profile settings
prefs.setdefault('profile_use_scale', False)
self.w.restore_scale.set_state(prefs['profile_use_scale'])
prefs.setdefault('profile_use_pan', False)
self.w.restore_pan.set_state(prefs['profile_use_pan'])
prefs.setdefault('profile_use_cuts', False)
self.w.restore_cuts.set_state(prefs['profile_use_cuts'])
prefs.setdefault('profile_use_transform', False)
self.w.restore_transform.set_state(prefs['profile_use_transform'])
prefs.setdefault('profile_use_rotation', False)
self.w.restore_rotation.set_state(prefs['profile_use_rotation'])
prefs.setdefault('profile_use_color_map', False)
self.w.restore_color_map.set_state(prefs['profile_use_color_map'])
def save_preferences(self):
self.t_.save()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.preferences_to_controls()
def pause(self):
pass
def resume(self):
pass
def stop(self):
self.gui_up = False
def redo(self):
pass
def __str__(self):
return 'preferences'
# END
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/event_handling/viewlims.py | 6 | 2880 | # Creates two identical panels. Zooming in on the right panel will show
# a rectangle in the first panel, denoting the zoomed region.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandlebrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1,1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=np.complex)
mask = np.ones(threshold_time.shape, dtype=np.bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
#Get the number of points from the number of pixels in the window
dims = ax.axesPatch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
#Get the range for the new area
xstart,ystart,xdelta,ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandlebrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig1, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black')
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
plt.show()
| mit |
abhiver222/perkt | face_recognition.py | 2 | 5724 | import cv2
import sys
#import matplotlib.pyplot as pt
import numpy as np
import numpy.linalg as la
import math as mt
#Content of out eigens
<<<<<<< HEAD:face_recognition.py
# there would be five images of each person
# the collumns would be the frob norm of each type
# 4 rows for each person
# 1)Smiling
# 2)Sad
# 3)Serious
# 4)Blank
# 5)If wearing specs then without specs
# 6)looking left
# 7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
# 'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
# 'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
# 'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81], #left right serious
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.4.3]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
<<<<<<< HEAD:face_recognition.py
'Chris':50,
=======
'Chris':200,
>>>>>>> origin/master:facial_recognition.py
'Tim':150}
#hardcode values into ournorms above
imagePath = sys.argv[1]
<<<<<<< HEAD:face_recognition.py
def recognizeFace(image,faces):
=======
def recognizeFace(faces, image):
>>>>>>> origin/master:facial_recognition.py
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
=======
#there would be five images of each person
#the collumns would be the frob norm of each type
#4 rows for each person
#1)Smiling
#2)Sad
#3)Serious
#4)Blank
#5)If wearing specs then without specs
#6)looking left
#7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
#'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
#'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
#'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81],
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.43]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
'Chris':50,
'Tim':150}
#hardcode values into ournorms above
#imagePath = sys.argv[1]
def recognizeFace(image,faces):
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
def checker(inmod):
tempnorm = la.norm(inmod)
retval = False
for name,val in ournorms.iteritems():
for j in val:
if(np.abs(j-tempnorm)<indbuffervals[name]):
retval = True;
print("is")
print(name)
break
if(retval):
break
if(not retval):
print("not")
print(name)
return retval
# Get values from command line
def check(image):
#imagePath = sys.argv[1]
#cascPath = sys.argv[2]
imagePath = image
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
imnonmod = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.25,
minNeighbors=5,
minSize=(40, 40)
)
<<<<<<< HEAD:face_recognition.py
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
<<<<<<< HEAD:face_recognition.py
what = True
if(len(faces)>0):
what, number = recognizeFace(image,faces)
=======
what = True
if(len(faces)>0):
what, number = recognizeFace(faces, image)
>>>>>>> origin/master:facial_recognition.py
# return what to the arduino
if(what is False):
print("intruder detected")
=======
print "Found {0} faces!".format(len(faces))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
what = True
<<<<<<< HEAD:face_recognition.py
cv2.imshow("Faces found", image)
<<<<<<< HEAD:face_recognition.py
cv2.waitKey(0)
=======
#cv2.waitKey(0)
return what
=======
if(len(faces)>0):
what, number = recognizeFace(image,faces)
# return what to the arduino
if(what is False):
print("intruder detected")
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
>>>>>>> origin/master:facial_recognition.py
cv2.imshow("Faces found", image)
#cv2.waitKey(0)
check(imagePath)
#check(imagePath)
| mit |
Unidata/MetPy | v0.11/startingguide-1.py | 4 | 1432 | import matplotlib.pyplot as plt
import numpy as np
import metpy.calc as mpcalc
from metpy.plots import SkewT
from metpy.units import units
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Create arrays of pressure, temperature, dewpoint, and wind components
p = [902, 897, 893, 889, 883, 874, 866, 857, 849, 841, 833, 824, 812, 796, 776, 751,
727, 704, 680, 656, 629, 597, 565, 533, 501, 468, 435, 401, 366, 331, 295, 258,
220, 182, 144, 106] * units.hPa
t = [-3, -3.7, -4.1, -4.5, -5.1, -5.8, -6.5, -7.2, -7.9, -8.6, -8.9, -7.6, -6, -5.1,
-5.2, -5.6, -5.4, -4.9, -5.2, -6.3, -8.4, -11.5, -14.9, -18.4, -21.9, -25.4,
-28, -32, -37, -43, -49, -54, -56, -57, -58, -60] * units.degC
td = [-22, -22.1, -22.2, -22.3, -22.4, -22.5, -22.6, -22.7, -22.8, -22.9, -22.4,
-21.6, -21.6, -21.9, -23.6, -27.1, -31, -38, -44, -46, -43, -37, -34, -36,
-42, -46, -49, -48, -47, -49, -55, -63, -72, -88, -93, -92] * units.degC
# Calculate parcel profile
prof = mpcalc.parcel_profile(p, t[0], td[0]).to('degC')
u = np.linspace(-10, 10, len(p)) * units.knots
v = np.linspace(-20, 20, len(p)) * units.knots
skew.plot(p, t, 'r')
skew.plot(p, td, 'g')
skew.plot(p, prof, 'k') # Plot parcel profile
skew.plot_barbs(p[::5], u[::5], v[::5])
skew.ax.set_xlim(-50, 15)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
plt.show() | bsd-3-clause |
joegomes/deepchem | deepchem/models/tests/test_overfit.py | 1 | 35451 | """
Tests to make sure deepchem models can overfit on tiny datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
import scipy.io
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
class TestOverfit(test_util.TensorFlowTestCase):
"""
Test that models can overfit simple datasets.
"""
def setUp(self):
super(TestOverfit, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_sklearn_regression_overfit(self):
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit(self):
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit(self):
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_regression_overfit(self):
"""Test that TensorFlow models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_classification_overfit(self):
"""Test that tensorflow models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_fittransform_regression_overfit(self):
"""Test that TensorFlow FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_skewed_classification_overfit(self):
"""Test tensorflow models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tf_skewed_missing_classification_overfit(self):
"""TF, skewed data, few actives
Test tensorflow models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[1.],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
def test_sklearn_multitask_classification_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny data."""
n_tasks = 10
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_robust_multitask_classification_overfit(self):
"""Test tf robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_logreg_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowLogisticRegression(
n_tasks,
n_features,
learning_rate=0.5,
weight_init_stddevs=[.01],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_IRV_multitask_classification_overfit(self):
"""Test IRV classifier overfits tiny data."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskIRVClassifier(
n_tasks, K=5, learning_rate=0.01, batch_size=n_samples)
# Fit trained model
model.fit(dataset_trans)
model.save()
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_multitask_regression_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny regression data."""
n_tasks = 2
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.r2_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestRegressor()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_tf_multitask_regression_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_robust_multitask_regression_overfit(self):
"""Test tf robust multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .2
def test_graph_conv_singletask_classification_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphClassifier(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_graph_conv_singletask_regression_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-2,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] < .2
def test_DTNN_multitask_regression_overfit(self):
"""Test deep tensor neural net overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
# Load mini log-solubility dataset.
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_tasks = y.shape[1]
max_n_atoms = list(dataset.get_data_shape())[0]
batch_size = 10
graph_model = dc.nn.SequentialDTNNGraph(max_n_atoms=max_n_atoms)
graph_model.add(dc.nn.DTNNEmbedding(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNGather(n_embedding=20))
n_feat = 20
model = dc.models.DTNNGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_DAG_singletask_regression_overfit(self):
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
graph = dc.nn.SequentialDAGGraph(
n_feat, batch_size=batch_size, max_atoms=50)
graph.add(dc.nn.DAGLayer(30, n_feat, max_atoms=50))
graph.add(dc.nn.DAGGather(max_atoms=50))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=0.005,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_weave_singletask_classification_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.SequentialWeaveGraph(
max_atoms=max_atoms, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat)
graph.add(dc.nn.WeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.WeaveConcat(batch_size, n_output=n_feat))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(dc.nn.WeaveGather(batch_size, n_input=n_feat))
model = dc.models.MultitaskGraphClassifier(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_weave_singletask_regression_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.SequentialWeaveGraph(
max_atoms=max_atoms, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat)
graph.add(dc.nn.WeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.WeaveConcat(batch_size, n_output=n_feat))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(dc.nn.WeaveGather(batch_size, n_input=n_feat))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_siamese_singletask_classification_overfit(self):
"""Test siamese singletask model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
n_train_trials = 80
support_batch_size = n_pos + n_neg
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .75
##################################################### DEBUG
def test_attn_lstm_singletask_classification_overfit(self):
"""Test attn lstm singletask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply an attention lstm layer
support_model.join(
dc.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .85
assert scores[0] > .79
##################################################### DEBUG
def test_residual_lstm_singletask_classification_overfit(self):
"""Test resi-lstm multitask overfits tiny data."""
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply a residual lstm layer
support_model.join(
dc.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .65
##################################################### DEBUG
def test_tf_progressive_regression_overfit(self):
"""Test tf progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.rms_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
seed=123,
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [metric])
y_pred = model.predict(dataset)
assert scores[metric.name] < .2
| mit |
mattsmart/biomodels | oncogenesis_dynamics/firstpassage.py | 1 | 15435 | import matplotlib.pyplot as plt
import numpy as np
import time
from os import sep
from multiprocessing import Pool, cpu_count
from constants import OUTPUT_DIR, PARAMS_ID, PARAMS_ID_INV, COLOURS_DARK_BLUE
from data_io import read_varying_mean_sd_fpt_and_params, collect_fpt_mean_stats_and_params, read_fpt_and_params,\
write_fpt_and_params
from formulae import stoch_gillespie, stoch_tauleap_lowmem, stoch_tauleap, get_physical_fp_stable_and_not, map_init_name_to_init_cond
from params import Params
from presets import presets
from plotting import plot_table_params
def get_fpt(ensemble, init_cond, params, num_steps=1000000, establish_switch=False, brief=True):
# TODO could pass simmethod tau or gillespie to params and parse here
if establish_switch:
fpt_flag = False
establish_flag = True
else:
fpt_flag = True
establish_flag = False
fp_times = np.zeros(ensemble)
for i in xrange(ensemble):
if brief:
species_end, times_end = stoch_tauleap_lowmem(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
else:
species, times = stoch_gillespie(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
times_end = times[-1]
# plotting
#plt.plot(times, species)
#plt.show()
fp_times[i] = times_end
if establish_switch:
print "establish time is", fp_times[i]
return fp_times
def get_mean_fpt(init_cond, params, samplesize=32, establish_switch=False):
fpt = get_fpt(samplesize, init_cond, params, establish_switch=establish_switch)
return np.mean(fpt)
def wrapper_get_fpt(fn_args_dict):
np.random.seed() # TODO double check that this fixes cluster RNG issues
if fn_args_dict['kwargs'] is not None:
return get_fpt(*fn_args_dict['args'], **fn_args_dict['kwargs'])
else:
return get_fpt(*fn_args_dict['args'])
def fast_fp_times(ensemble, init_cond, params, num_processes, num_steps='default', establish_switch=False):
if num_steps == 'default':
kwargs_dict = {'num_steps': 1000000, 'establish_switch': establish_switch}
else:
kwargs_dict = {'num_steps': num_steps, 'establish_switch': establish_switch}
fn_args_dict = [0]*num_processes
print "NUM_PROCESSES:", num_processes
assert ensemble % num_processes == 0
for i in xrange(num_processes):
subensemble = ensemble / num_processes
print "process:", i, "job size:", subensemble, "runs"
fn_args_dict[i] = {'args': (subensemble, init_cond, params),
'kwargs': kwargs_dict}
t0 = time.time()
pool = Pool(num_processes)
results = pool.map(wrapper_get_fpt, fn_args_dict)
pool.close()
pool.join()
print "TIMER:", time.time() - t0
fp_times = np.zeros(ensemble)
for i, result in enumerate(results):
fp_times[i*subensemble:(i+1)*subensemble] = result
return fp_times
def fast_mean_fpt_varying(param_vary_name, param_vary_values, params, num_processes, init_name="x_all", samplesize=30, establish_switch=False):
assert samplesize % num_processes == 0
mean_fpt_varying = [0]*len(param_vary_values)
sd_fpt_varying = [0]*len(param_vary_values)
for idx, pv in enumerate(param_vary_values):
params_step = params.mod_copy( {param_vary_name: pv} )
init_cond = map_init_name_to_init_cond(params, init_name)
fp_times = fast_fp_times(samplesize, init_cond, params_step, num_processes, establish_switch=establish_switch)
mean_fpt_varying[idx] = np.mean(fp_times)
sd_fpt_varying[idx] = np.std(fp_times)
return mean_fpt_varying, sd_fpt_varying
def fpt_histogram(fpt_list, params, figname_mod="", flag_show=False, flag_norm=True, flag_xlog10=False, flag_ylog10=False, fs=12):
ensemble_size = len(fpt_list)
bins = np.linspace(np.min(fpt_list), np.max(fpt_list), 50) #50)
#bins = np.arange(0, 3*1e4, 50) # to plot against FSP
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=(8,6), dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(fpt_list))) # TODO check this matches multihist
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot
plt.hist(fpt_list, bins=bins, alpha=0.6, weights=weights)
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, label=None, weights=weights, edgecolor='k', linewidth=0.5,
fill=False)
# draw mean line
#plt.axvline(np.mean(fpt_list), color='k', linestyle='dashed', linewidth=2)
# labels
plt.title('First-passage time histogram (%d runs) - %s' % (ensemble_size, params.system), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
#plt.legend(loc='upper right', fontsize=fs)
# create table of params
plot_table_params(ax, params)
# save and show
plt_save = "fpt_histogram" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
return ax
def fpt_histogram_multi(multi_fpt_list, labels, figname_mod="", fs=12, bin_linspace=80, colours=COLOURS_DARK_BLUE,
figsize=(8,6), ec='k', lw=0.5, flag_norm=False, flag_show=False, flag_xlog10=False,
flag_ylog10=False, flag_disjoint=False):
# resize fpt lists if not all same size (to the min size)
fpt_lengths = [len(fpt) for fpt in multi_fpt_list]
ensemble_size = np.min(fpt_lengths)
# cleanup data to same size
if sum(fpt_lengths - ensemble_size) > 0:
print "Resizing multi_fpt_list elements:", fpt_lengths, "to the min size of:", ensemble_size
for idx in xrange(len(fpt_lengths)):
multi_fpt_list[idx] = multi_fpt_list[idx][:ensemble_size]
bins = np.linspace(np.min(multi_fpt_list), np.max(multi_fpt_list), bin_linspace)
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(multi_fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(multi_fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=figsize, dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(multi_fpt_list)))
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot calls
if flag_disjoint:
plt.hist(multi_fpt_list, bins=bins, color=colours, label=labels, weights=weights, edgecolor=ec, linewidth=lw)
else:
for idx, fpt_list in enumerate(multi_fpt_list):
plt.hist(fpt_list, bins=bins, alpha=0.6, color=colours[idx], label=labels[idx],
weights=weights[idx,:])
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, color=colours[idx],
label=None,weights=weights[idx,:], edgecolor=ec, linewidth=lw, fill=False)
# labels
plt.title('First-passage time histogram (%d runs)' % (ensemble_size), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
plt.legend(loc='upper right', fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
# save and show
plt_save = "fpt_multihistogram" + figname_mod
fig.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
def plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_vary_name, param_set, params, samplesize, SEM_flag=True, show_flag=False, figname_mod=""):
if SEM_flag:
sd_fpt_varying = sd_fpt_varying / np.sqrt(samplesize) # s.d. from CLT since sample mean is approx N(mu, sd**2/n)
plt.errorbar(param_set, mean_fpt_varying, yerr=sd_fpt_varying, label="sim")
plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax = plt.gca()
ax.set_xlabel(param_vary_name)
ax.set_ylabel('Mean FP time')
# log options
for i in xrange(len(mean_fpt_varying)):
print i, param_set[i], mean_fpt_varying[i], sd_fpt_varying[i]
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
#ax.set_xlim([0.8*1e2, 1*1e7])
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
#ax.set_ylim([0.8*1e2, 3*1e5])
# create table of params
plot_table_params(ax, params)
plt_save = "mean_fpt_varying" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.png', bbox_inches='tight')
if show_flag:
plt.show()
return ax
if __name__ == "__main__":
# SCRIPT FLAGS
run_compute_fpt = False
run_read_fpt = False
run_generate_hist_multi = False
run_load_hist_multi = False
run_collect = False
run_means_read_and_plot = False
run_means_collect_and_plot = True
# SCRIPT PARAMETERS
establish_switch = True
brief = True
num_steps = 1000000 # default 1000000
ensemble = 1 # default 100
# DYNAMICS PARAMETERS
params = presets('preset_xyz_constant') # preset_xyz_constant, preset_xyz_constant_fast, valley_2hit
# OTHER PARAMETERS
init_cond = np.zeros(params.numstates, dtype=int)
init_cond[0] = int(params.N)
# PLOTTING
FS = 16
EC = 'k'
LW = 0.5
FIGSIZE=(8,6)
if run_compute_fpt:
fp_times = get_fpt(ensemble, init_cond, params, num_steps=num_steps, establish_switch=establish_switch, brief=brief)
write_fpt_and_params(fp_times, params)
fpt_histogram(fp_times, params, flag_show=True, figname_mod="XZ_model_withFeedback_mu1e-1")
if run_read_fpt:
dbdir = OUTPUT_DIR
dbdir_100 = dbdir + sep + "fpt_mean" + sep + "100_c95"
fp_times_xyz_100, params_a = read_fpt_and_params(dbdir_100)
dbdir_10k = dbdir + sep + "fpt_mean" + sep + "10k_c95"
fp_times_xyz_10k, params_b = read_fpt_and_params(dbdir_10k)
if run_generate_hist_multi:
ensemble = 21
num_proc = cpu_count() - 1
param_vary_id = "N"
param_idx = PARAMS_ID_INV[param_vary_id]
param_vary_values = [1e2, 1e3, 1e4]
param_vary_labels = ['A', 'B', 'C']
params_ensemble = [params.params_list[:] for _ in param_vary_values]
multi_fpt = np.zeros((len(param_vary_values), ensemble))
multi_fpt_labels = ['label' for _ in param_vary_values]
for idx, param_val in enumerate(param_vary_values):
param_val_string = "%s=%.3f" % (param_vary_id, param_val)
params_step = params.mod_copy({param_vary_id: param_val})
#fp_times = get_fpt(ensemble, init_cond, params_set[idx], num_steps=num_steps)
fp_times = fast_fp_times(ensemble, init_cond, params_step, num_proc, establish_switch=establish_switch)
write_fpt_and_params(fp_times, params_step, filename="fpt_multi", filename_mod=param_val_string)
multi_fpt[idx,:] = np.array(fp_times)
multi_fpt_labels[idx] = "%s (%s)" % (param_vary_labels[idx], param_val_string)
fpt_histogram_multi(multi_fpt, multi_fpt_labels, flag_show=True, flag_ylog10=False)
if run_load_hist_multi:
flag_norm = True
dbdir = OUTPUT_DIR + sep + "may25_100"
#dbdir_c80 = dbdir + "fpt_feedback_z_ens1040_c0.80_params"
c80_header = "fpt_feedback_z_ens1040_c80_N100"
c88_header = "fpt_feedback_z_ens1040_c88_N100"
c95_header = "fpt_feedback_z_ens1040_c95_N100"
fp_times_xyz_c80, params_a = read_fpt_and_params(dbdir, "%s_data.txt" % c80_header, "%s_params.csv" % c80_header)
fp_times_xyz_c88, params_b = read_fpt_and_params(dbdir, "%s_data.txt" % c88_header, "%s_params.csv" % c88_header)
fp_times_xyz_c95, params_c = read_fpt_and_params(dbdir, "%s_data.txt" % c95_header, "%s_params.csv" % c95_header)
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=False, figname_mod="_xyz_feedbackz_N10k_c88_may25")
plt.close('all')
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=True, figname_mod="_xyz_feedbackz_N10k_c88_may25_logy")
plt.close('all')
multi_fpt = [fp_times_xyz_c80, fp_times_xyz_c88, fp_times_xyz_c95]
labels = ("c=0.80 (Region I)", "c=0.88 (Region IV)", "c=0.95 (Region III)")
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=False, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=False, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE, flag_disjoint=True)
if run_means_read_and_plot:
datafile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N.txt"
paramfile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N_params.csv"
samplesize=48
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plt_axis = plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
"""
mu = params.mu
mixed_fp_zinf_at_N = [0.0]*len(param_set)
for idx, N in enumerate(param_set):
params_at_N = params.mod_copy( {'N': N} )
fps = get_physical_and_stable_fp(params_at_N)
assert len(fps) == 1
mixed_fp_zinf_at_N[idx] = fps[0][2]
plt_axis.plot(param_set, [1/(mu*n) for n in param_set], '-o', label="(mu*N)^-1")
plt_axis.plot(param_set, [1/(mu*zinf) for zinf in mixed_fp_zinf_at_N], '-o', label="(mu*z_inf)^-1")
plt_axis.set_yscale("log", nonposx='clip')
plt_axis.set_xscale("log", nonposx='clip')
plt_axis.legend()
plt.savefig(OUTPUT_DIR + sep + "theorycompare_loglog" + '.png', bbox_inches='tight')
plt.show()
"""
if run_means_collect_and_plot:
dbdir = OUTPUT_DIR + sep + "tocollect" + sep + "runset_june17_FPT_cvary_44_ens240"
datafile, paramfile = collect_fpt_mean_stats_and_params(dbdir)
samplesize=240
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
| mit |
soylentdeen/Graffity | src/Vibrations/VibrationExplorer.py | 1 | 5531 | import sys
sys.path.append('../')
import numpy
import Graffity
import CIAO_DatabaseTools
import astropy.time as aptime
from matplotlib import pyplot
import colorsys
def getFreqs():
while True:
retval = []
enteredText = raw_input("Enter a comma separated list of frequencies: ")
try:
for val in enteredText.split(','):
retval.append(float(val.strip()))
break
except:
pass
return retval
def getModes():
while True:
enteredText = raw_input("Which modes to investigate? AVC or ALL? : ")
if enteredText == 'AVC':
return 'AVC'
if enteredText == 'ALL':
return 'ALL'
def getDataLoggers(DB, GravityVals, startTime, ax=None):
order = numpy.argsort(GravityVals[:,-2])
GravityVals = GravityVals[order]
i = 1
for record in GravityVals:
print("%03d | %s" % (i,aptime.Time(float(record[-2]), format='mjd').iso))
i += 1
index = int(raw_input("Enter desired index :")) - 1
FTData = Graffity.GRAVITY_Data(GravityVals[index][-1])
FTData.DualSciP2VM.computeOPDPeriodograms()
VibrationPeaks = FTData.DualSciP2VM.findVibrationPeaks()
FTData.computeACQCAMStrehl()
FTData.computeACQCAMStrehl()
#freqs = getFreqs()
#Modes = getModes()
CIAOVals = DB.query(keywords=['ALT', 'AZ', 'STREHL'], timeOfDay='NIGHT', startTime=startTime)
DataLoggers = {}
for UT in [1, 2, 3, 4]:
closest = numpy.argsort(numpy.abs(CIAOVals[UT][:,-4]
- float(GravityVals[index,-2])))[0]
DataLoggers[UT] = Graffity.DataLogger(directory=CIAOVals[UT][closest,-3])
DataLoggers[UT].loadData()
DataLoggers[UT].computeStrehl()
freqs = extractBCIFreqs(VibrationPeaks, UT)
DataLoggers[UT].measureVibs(frequencies=freqs, modes='AVC')
return DataLoggers, VibrationPeaks
def extractBCIFreqs(VibrationPeaks, UT):
freqs = []
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
for bl in baselines.keys():
if UT in baselines[bl]:
for f in VibrationPeaks[bl]['freqs']:
freqs.append(f)
return numpy.array(freqs)
fig = pyplot.figure(0, figsize=(8.0, 10.0), frameon=False)
fig.clear()
ax1 = fig.add_axes([0.1, 0.2, 0.4, 0.3])
ax2 = fig.add_axes([0.1, 0.5, 0.4, 0.4], sharex=ax1)
ax3 = fig.add_axes([0.5, 0.2, 0.4, 0.3], sharex=ax1)
ax3.yaxis.tick_right()
ax4 = fig.add_axes([0.5, 0.5, 0.4, 0.4], sharex=ax1)
ax4.yaxis.tick_right()
GDB = CIAO_DatabaseTools.GRAVITY_Database()
CDB = CIAO_DatabaseTools.CIAO_Database()
startTime = '2017-08-10 00:00:00'
GravityVals = GDB.query(keywords = [], timeOfDay='NIGHT', startTime=startTime)
#ax1.set_xscale('log')
#ax1.set_yscale('log')
CIAO, Vibrations = getDataLoggers(CDB, GravityVals, startTime, ax=ax1)
hsv = [(numpy.random.uniform(low=0.0, high=1),
numpy.random.uniform(low=0.2, high=1),
numpy.random.uniform(low=0.9, high=1)) for i in
range(99)]
colors = []
for h in hsv:
colors.append(colorsys.hsv_to_rgb(h[0], h[1], h[2]))
handles = numpy.array([])
labels = numpy.array([])
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
colors = {0:'y', 1:'g', 2:'r', 3:'c', 4:'m', 5:'k'}
for CIAO_ID, ax in zip([1, 2, 3, 4], [ax1, ax2, ax3, ax4]):
DL = CIAO[CIAO_ID]
for mode in DL.vibPower.keys():
BCIVibs = {}
for bl in baselines.keys():
if CIAO_ID in baselines[bl]:
label = "UT%dUT%d" % (baselines[bl][0], baselines[bl][1])
BCIVibs[label] = {'index':bl, 'power':[]}
f = []
p = []
for peak in DL.vibPower[mode]['CommPower'].iteritems():
if peak[1] > 0:
f.append(peak[0])
p.append(numpy.log10(peak[1]))
for label in BCIVibs.keys():
if not( f[-1] in Vibrations[BCIVibs[label]['index']]['freqs']):
BCIVibs[label]['power'].append(0.0)
else:
for i, freq in enumerate(Vibrations[BCIVibs[label]['index']]['freqs']):
if freq == f[-1]:
BCIVibs[label]['power'].append(Vibrations[BCIVibs[label]['index']]['power'][i])
#ax.plot(DL.ZPowerFrequencies, numpy.log10(DL.ZPowerCommands[mode,:]), color =
# colors[mode])
f = numpy.array(f)
p = numpy.array(p)
ax.scatter(numpy.log10(f), p, color='b')
for bl in BCIVibs.keys():
BCIVibs[bl]['power'] = numpy.array(BCIVibs[bl]['power'])
nonzero = BCIVibs[bl]['power'] > 0.0
ax.scatter(numpy.log10(f[nonzero]), numpy.log10(BCIVibs[bl]['power'][nonzero]),
label=bl, color = colors[BCIVibs[bl]['index']])
#ax.scatter(numpy.array(f), numpy.array(p), color=colors[mode],
# label='Mode %d' % mode)
h, l = ax.get_legend_handles_labels()
handles=numpy.append(handles, numpy.array(h))
labels =numpy.append(labels, numpy.array(l))
#ax1.set_ybound(0, 20)
#ax2.set_ybound(0, 20)
#ax3.set_ybound(0, 20)
#ax4.set_ybound(0, 20)
#ax1.set_xbound(0, 160)
#ax2.set_xbound(0, 160)
#ax3.set_xbound(0, 160)
#ax4.set_xbound(0, 160)
#ax2.xaxis.set_ticklabels([])
#ax4.xaxis.set_ticklabels([])
junk, indices = numpy.unique(labels, return_index=True)
fig.legend(handles[indices], labels[indices], ncol=4, loc=3, scatterpoints=1)
fig.show()
#"""
| mit |
eoinmurray/icarus | Experiments/power_dep.py | 1 | 1341 |
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import numpy as np
import matplotlib.pyplot as plt
from constants import Constants
import Icarus.Experiment as Experiment
if __name__ == "__main__":
"""
Runs power dependance.
"""
constants = Constants()
hold_power = np.linspace(0.2, 0.8, num=60)
hold_x = []
hold_xx = []
for power in hold_power:
constants.power = power
experiment = Experiment(constants, Visualizer=False)
experiment.run('power_dep')
hold_x.append(experiment.spectrometer.x)
hold_xx.append(experiment.spectrometer.xx)
plt.plot(np.log10(hold_power), np.log10(hold_x), 'ro')
plt.plot(np.log10(hold_power), np.log10(hold_xx), 'bo')
idx = (np.abs(hold_power-1)).argmin()
A = np.vstack([np.log10(hold_power[0:idx]), np.ones(len(np.log10(hold_power[0:idx])))]).T
mx, cx = np.linalg.lstsq(A, np.log10(hold_x[0:idx]))[0]
mxx, cxx = np.linalg.lstsq(A, np.log10(hold_xx[0:idx]))[0]
print mx, mxx
hold_power_interpolate = np.linspace(np.min(hold_power[0:idx]), np.max(hold_power[0:idx]), num=200)
plt.plot(np.log10(hold_power_interpolate), mx*np.log10(hold_power_interpolate) + cx, 'g--')
plt.plot(np.log10(hold_power_interpolate), mxx*np.log10(hold_power_interpolate) + cxx, 'g--')
plt.legend(['X', 'XX'])
plt.show() | mit |
openai/baselines | baselines/results_plotter.py | 1 | 3455 | import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, yaxis, title):
fig = plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i % len(COLORS)]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.tight_layout()
fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())
plt.grid(True)
def split_by_task(taskpath):
return taskpath['dirname'].split('/')[-1].split('-')[0]
def plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task):
results = plot_util.load_results(dirs)
plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn, average_group=True, resample=int(1e6))
# Example usage in jupyter-notebook
# from baselines.results_plotter import plot_results
# %matplotlib inline
# plot_results("./log")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD)
parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main()
| mit |
Sixshaman/networkx | doc/make_gallery.py | 35 | 2453 | """
Generate a thumbnail gallery of examples.
"""
from __future__ import print_function
import os, glob, re, shutil, sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot
import matplotlib.image
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
examples_source_dir = '../examples/drawing'
examples_dir = 'examples/drawing'
template_dir = 'source/templates'
static_dir = 'source/static/examples'
pwd=os.getcwd()
rows = []
template = """
{%% extends "layout.html" %%}
{%% set title = "Gallery" %%}
{%% block body %%}
<h3>Click on any image to see source code</h3>
<br/>
%s
{%% endblock %%}
"""
link_template = """
<a href="%s"><img src="%s" border="0" alt="%s"/></a>
"""
if not os.path.exists(static_dir):
os.makedirs(static_dir)
os.chdir(examples_source_dir)
all_examples=sorted(glob.glob("*.py"))
# check for out of date examples
stale_examples=[]
for example in all_examples:
png=example.replace('py','png')
png_static=os.path.join(pwd,static_dir,png)
if (not os.path.exists(png_static) or
os.stat(png_static).st_mtime < os.stat(example).st_mtime):
stale_examples.append(example)
for example in stale_examples:
print(example, end=" ")
png=example.replace('py','png')
matplotlib.pyplot.figure(figsize=(6,6))
stdout=sys.stdout
sys.stdout=open('/dev/null','w')
try:
execfile(example)
sys.stdout=stdout
print(" OK")
except ImportError as strerr:
sys.stdout=stdout
sys.stdout.write(" FAIL: %s\n" % strerr)
continue
matplotlib.pyplot.clf()
im=matplotlib.image.imread(png)
fig = Figure(figsize=(2.5, 2.5))
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks
=[])
# basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
thumbfile=png.replace(".png","_thumb.png")
fig.savefig(thumbfile)
shutil.copy(thumbfile,os.path.join(pwd,static_dir,thumbfile))
shutil.copy(png,os.path.join(pwd,static_dir,png))
basename, ext = os.path.splitext(example)
link = '%s/%s.html'%(examples_dir, basename)
rows.append(link_template%(link, os.path.join('_static/examples',thumbfile), basename))
os.chdir(pwd)
fh = open(os.path.join(template_dir,'gallery.html'), 'w')
fh.write(template%'\n'.join(rows))
fh.close()
| bsd-3-clause |
hainm/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
kcarnold/autograd | examples/fluidsim/fluidsim.py | 2 | 4623 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from scipy.misc import imread
import matplotlib
import matplotlib.pyplot as plt
import os
from builtins import range
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def project(vx, vy):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
h = 1.0/vx.shape[0]
div = -0.5 * h * (np.roll(vx, -1, axis=0) - np.roll(vx, 1, axis=0)
+ np.roll(vy, -1, axis=1) - np.roll(vy, 1, axis=1))
for k in range(10):
p = (div + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0)
+ np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1))/4.0
vx -= 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))/h
vy -= 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))/h
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_xs).astype(int)
top_ix = np.floor(center_ys).astype(int)
rw = center_xs - left_ix # Relative weight of right-hand cells.
bw = center_ys - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def simulate(vx, vy, smoke, num_time_steps, ax=None, render=False):
print("Running simulation...")
for t in range(num_time_steps):
if ax: plot_matrix(ax, smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated)
smoke = advect(smoke, vx, vy)
if ax: plot_matrix(ax, smoke, num_time_steps, render)
return smoke
def plot_matrix(ax, mat, t, render=False):
plt.cla()
ax.matshow(mat)
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
matplotlib.image.imsave('step{0:03d}.png'.format(t), mat)
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 100
print("Loading initial and target states...")
init_smoke = imread('init_smoke.png')[:,:,0]
#target = imread('peace.png')[::2,::2,3]
target = imread('skull.png')[::2,::2]
rows, cols = target.shape
init_dx_and_dy = np.zeros((2, rows, cols)).ravel()
def distance_from_target_image(smoke):
return np.mean((target - smoke)**2)
def convert_param_vector_to_matrices(params):
vx = np.reshape(params[:(rows*cols)], (rows, cols))
vy = np.reshape(params[(rows*cols):], (rows, cols))
return vx, vy
def objective(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
final_smoke = simulate(init_vx, init_vy, init_smoke, simulation_timesteps)
return distance_from_target_image(final_smoke)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_dx_and_dy, jac=True, method='CG',
options={'maxiter':25, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
init_vx, init_vy = convert_param_vector_to_matrices(result.x)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax, render=True)
print("Converting frames to an animated GIF...")
os.system("convert -delay 5 -loop 0 step*.png"
" -delay 250 step100.png surprise.gif") # Using imagemagick.
os.system("rm step*.png")
| mit |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |
Juanlu001/pfc | demo/plot_h.py | 1 | 6084 | #******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <[email protected]> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import sys
import os
from os import path
import numpy as np
try:
from PyQt4 import QtGui
except:
try:
from PySide import QtGui
except:
raise ImportError("PyQt4 or PySide is required to use this tool")
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except:
raise ImportError("matplotlib is required to use this tool")
class FigureController(FigureCanvas):
"""Matplotlib figure widget controller"""
def __init__(self):
"""Constructor"""
# Create the figure in the canvas
self.fig = Figure()
self.ax11 = self.fig.add_subplot(221)
self.ax21 = self.fig.add_subplot(222)
self.ax12 = self.fig.add_subplot(223)
self.ax22 = self.fig.add_subplot(224)
self.ax = (self.ax11, self.ax21, self.ax12, self.ax22)
FigureCanvas.__init__(self, self.fig)
FNAME = path.join('test_case_2_exp_data.dat')
# For some reason the input file is bad sortened
T,_,_,_,_,_,_,_,_,H3,H2,H1,H4, = self.readFile(FNAME)
exp_t = T
exp_h = (H1, H2, H3, H4)
titles = ('H1', 'H2', 'H3', 'H4')
self.lines = []
for i in range(len(self.ax)):
ax = self.ax[i]
t = [0.0]
h = [0.0]
line, = ax.plot(t,
h,
label=r'$H_{SPH}$',
color="black",
linewidth=1.0)
self.lines.append(line)
ax.plot(exp_t,
exp_h[i],
label=r'$H_{Exp}$',
color="red",
linewidth=1.0)
# Set some options
ax.grid()
ax.legend(loc='best')
ax.set_title(titles[i])
ax.set_xlim(0, 6)
ax.set_ylim(0.0, 0.6)
ax.set_autoscale_on(False)
ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
ax.set_ylabel(r"$H \, [\mathrm{m}]$", fontsize=21)
# force the figure redraw
self.fig.canvas.draw()
# call the update method (to speed-up visualization)
self.timerEvent(None)
# start timer, trigger event every 1000 millisecs (=1sec)
self.timer = self.startTimer(1000)
def readFile(self, filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[1:]:
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return map(list, zip(*data))
def timerEvent(self, evt):
"""Custom timerEvent code, called at timer event receive"""
# Read and plot the new data
data = self.readFile('sensors_h.out')
t = data[0]
hh = (data[-4], data[-3], data[-2], data[-1])
for i in range(len(hh)):
h = hh[i]
self.lines[i].set_data(t, h)
# Redraw
self.fig.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
widget = FigureController()
widget.setWindowTitle("Wave height")
widget.show()
sys.exit(app.exec_())
| gpl-3.0 |
wogsland/QSTK | build/lib.linux-x86_64-2.7/QSTK/qstkstudy/Events.py | 5 | 1878 | # (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#Created on October <day>, 2011
#
#@author: Vishal Shekhar
#@contact: [email protected]
#@summary: Example Event Datamatrix acceptable to EventProfiler App
#
import pandas
from QSTK.qstkutil import DataAccess as da
import numpy as np
import math
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(symbols, d_data, verbose=False):
# Get the data from the data store
storename = "Yahoo" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
if verbose:
print __name__ + " reading data"
close = d_data[closefield]
if verbose:
print __name__ + " finding events"
for symbol in symbols:
close[symbol][close[symbol]>= 1.0] = np.NAN
for i in range(1,len(close[symbol])):
if np.isnan(close[symbol][i-1]) and close[symbol][i] < 1.0 :#(i-1)th was > $1, and (i)th is <$1
close[symbol][i] = 1.0 #overwriting the price by the bit
close[symbol][close[symbol]< 1.0] = np.NAN
return close
| bsd-3-clause |
SitiBanc/1061_NCTU_IOMDS | 1025/Homework 5/HW5_5.py | 1 | 5472 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 21:05:37 2017
@author: sitibanc
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# =============================================================================
# Read CSV
# =============================================================================
df = pd.read_csv('TXF20112015.csv', sep=',', header = None) # dataframe (time, close, open, high, low, volume)
TAIEX = df.values # ndarray
tradeday = list(set(TAIEX[:, 0] // 10000)) # 交易日(YYYYMMDD)
tradeday.sort()
# =============================================================================
# Strategy 5: 承Strategy 4,加入30點停損點
# =============================================================================
profit0 = np.zeros((len(tradeday),1))
count = 0 # 進場次數
for i in range(len(tradeday)):
date = tradeday[i]
idx = np.nonzero(TAIEX[:, 0] // 10000 == date)[0]
idx.sort()
openning = TAIEX[idx[0], 2] # 當日開盤價
long_signal = openning + 30 # 買訊
short_signal = openning - 30 # 賣訊
# 符合買訊的時間點
idx2 = np.nonzero(TAIEX[idx, 3] >= long_signal)[0] # 買點
# 設定買訊停損點
if len(idx2) > 0:
# 當日交易中在第一個買訊之後(含買訊,故index = 0不能用在停損)的資料
tmp2 = TAIEX[idx[idx2[0]]:idx[-1], :]
idx2_stop = np.nonzero(tmp2[:, 4] <= openning)[0]
# 符合賣訊的時間點
idx3 = np.nonzero(TAIEX[idx, 4] <= short_signal)[0] # 賣點
# 設定賣訊停損點
if len(idx3) > 0:
# 當日交易中在第一個賣訊之後(含賣訊,故index = 0不能用在停損)的資料
tmp3 = TAIEX[idx[idx3[0]]:idx[-1], :]
idx3_stop = np.nonzero(tmp3[:, 3] >= openning)[0]
if len(idx2) == 0 and len(idx3) == 0: # 當日沒有觸及買賣點(不進場)
p1 = 0
p2 = 0
elif len(idx3) == 0: # 當日僅出現買訊(進場做多)
p1 = TAIEX[idx[idx2[0]], 1] # 第一個買點收盤價買進
if len(idx2_stop) > 1: # 有觸及停損點
p2 = tmp2[idx2_stop[1], 1] # 第一個停損點(index = 1)出現時的收盤價賣出
else:
p2 = TAIEX[idx[-1], 1] # 當日收盤價賣出
count += 1
elif len(idx2) == 0: # 當日僅出現賣訊(進場做空)
p2 = TAIEX[idx[idx3[0]], 1] # 第一個賣點收盤價賣出
if len(idx3_stop) > 1: # 有觸及停損點
p1 = tmp3[idx3_stop[1], 1] # 停損點出現時的收盤價買回
else:
p1 = TAIEX[idx[-1], 1] # 當日收盤價買回
count += 1
elif idx2[0] > idx3[0]: # 當日賣訊先出現(進場做空)
p2 = TAIEX[idx[idx3[0]], 1] # 第一個賣點收盤價賣出
if len(idx3_stop) > 1: # 有觸及停損點
p1 = tmp3[idx3_stop[1], 1] # 停損點出現時的收盤價買回
else:
p1 = TAIEX[idx[-1], 1] # 當日收盤價買回
count += 1
else: # 當日買訊先出現(進場做多)
p1 = TAIEX[idx[idx2[0]], 1] # 第一個買點收盤價買進
if len(idx2_stop) > 1: # 有觸及停損點
p2 = tmp2[idx2_stop[1], 1] # 停損點出現時的收盤價賣出
else:
p2 = TAIEX[idx[-1], 1] # 當日收盤價賣出
count += 1
profit0[i] = p2 - p1
print('Strategy 5: 承Strategy 4,加入30點停損點\n逐日損益折線圖')
profit02 = np.cumsum(profit0) # 逐日損益獲利
plt.plot(profit02) # 逐日損益折線圖
plt.show()
print('每日損益分佈圖')
plt.hist(profit0, bins = 100) # 每日損益的分佈圖(直方圖)
plt.show()
# 計算數據
ans1 = count # 進場次數
ans2 = profit02[-1] # 總損益點數
ans3 = np.sum(profit0 > 0) / ans1 * 100 # 勝率
ans4 = np.mean(profit0[profit0 > 0]) # 獲利時的平均獲利點數
zero_profit = len(profit0[profit0 <= 0]) - (len(profit0) - ans1)# 進場沒有贏的日數(profit為0 - 沒有進場)
ans5 = np.sum(profit0[profit0 < 0]) / zero_profit # 虧損時的平均虧損點數
print('進場次數:', ans1, '\n總損益點數:', ans2, '\n勝率:', ans3, '%')
print('賺錢時平均每次獲利點數', ans4, '\n輸錢時平均每次損失點數:', ans5, '\n')
| apache-2.0 |
Unidata/MetPy | v0.12/_downloads/7b1d8e864fd4783fdaff1a83cdf9c52f/Find_Natural_Neighbors_Verification.py | 6 | 2521 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumscribed
circle ("circumcircle") containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.interpolate.geometry import circumcircle_radius, find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, circumcenters = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can plot all of the triangles as well as the circles representing the circumcircles
#
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenters and calculated circumradii, plot the circumcircles
for idx, cc in enumerate(circumcenters):
ax.plot(cc[0], cc[1], 'k.', markersize=5)
circ = plt.Circle(cc, circumcircle_radius(*tri.points[tri.simplices[idx]]),
edgecolor='k', facecolor='none', transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
pfnet/chainer | examples/wavenet/train.py | 6 | 5955 | import argparse
import os
import pathlib
import warnings
import numpy
import chainer
from chainer.training import extensions
import chainerx
from net import EncoderDecoderModel
from net import UpsampleNet
from net import WaveNet
from utils import Preprocess
import matplotlib
matplotlib.use('Agg')
parser = argparse.ArgumentParser(description='Chainer example: WaveNet')
parser.add_argument('--batchsize', '-b', type=int, default=4,
help='Numer of audio clips in each mini-batch')
parser.add_argument('--length', '-l', type=int, default=7680,
help='Number of samples in each audio clip')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./VCTK-Corpus',
help='Directory of dataset')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--seed', type=int, default=0,
help='Random seed to split dataset into train and test')
parser.add_argument('--snapshot_interval', type=int, default=10000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
parser.add_argument('--process', type=int, default=1,
help='Number of parallel processes')
parser.add_argument('--prefetch', type=int, default=8,
help='Number of prefetch samples')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('GPU: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Datasets
if not os.path.isdir(args.dataset):
raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
dataset, int(len(dataset) * 0.9), args.seed)
# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))
# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)
# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize,
n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
valid, args.batchsize, repeat=False, shuffle=False,
n_processes=args.process, n_prefetch=args.prefetch)
# Updater and Trainer
updater = chainer.training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
# Extensions
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/accuracy',
'validation/main/loss', 'validation/main/accuracy']),
trigger=display_interval)
trainer.extend(extensions.PlotReport(
['main/loss', 'validation/main/loss'],
'iteration', file_name='loss.png', trigger=display_interval))
trainer.extend(extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'iteration', file_name='accuracy.png', trigger=display_interval))
trainer.extend(extensions.ProgressBar(update_interval=10))
# Resume
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run
trainer.run()
| mit |
JFriel/honours_project | networkx/build/lib/networkx/convert_matrix.py | 10 | 33329 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| gpl-3.0 |
shahankhatch/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
mganeva/mantid | Framework/PythonInterface/mantid/plots/modest_image/modest_image.py | 1 | 10141 | # v0.2 obtained on March 12, 2019
"""
Modification of Chris Beaumont's mpl-modest-image package to allow the use of
set_extent.
"""
from __future__ import print_function, division
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.image as mi
import matplotlib.colors as mcolors
import matplotlib.cbook as cbook
from matplotlib.transforms import IdentityTransform, Affine2D
import numpy as np
IDENTITY_TRANSFORM = IdentityTransform()
class ModestImage(mi.AxesImage):
"""
Computationally modest image class.
ModestImage is an extension of the Matplotlib AxesImage class
better suited for the interactive display of larger images. Before
drawing, ModestImage resamples the data array based on the screen
resolution and view window. This has very little affect on the
appearance of the image, but can substantially cut down on
computation since calculations of unresolved or clipped pixels
are skipped.
The interface of ModestImage is the same as AxesImage. However, it
does not currently support setting the 'extent' property. There
may also be weird coordinate warping operations for images that
I'm not aware of. Don't expect those to work either.
"""
def __init__(self, *args, **kwargs):
self._full_res = None
self._full_extent = kwargs.get('extent', None)
super(ModestImage, self).__init__(*args, **kwargs)
self.invalidate_cache()
def set_data(self, A):
"""
Set the image array
ACCEPTS: numpy/PIL Image A
"""
self._full_res = A
self._A = A
if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype,
np.float):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self.invalidate_cache()
def invalidate_cache(self):
self._bounds = None
self._imcache = None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
self._sx, self._sy = None, None
self._pixel2world_cache = None
self._world2pixel_cache = None
def set_extent(self, extent):
self._full_extent = extent
self.invalidate_cache()
mi.AxesImage.set_extent(self, extent)
def get_array(self):
"""Override to return the full-resolution array"""
return self._full_res
@property
def _pixel2world(self):
if self._pixel2world_cache is None:
# Pre-compute affine transforms to convert between the 'world'
# coordinates of the axes (what is shown by the axis labels) to
# 'pixel' coordinates in the underlying array.
extent = self._full_extent
if extent is None:
self._pixel2world_cache = IDENTITY_TRANSFORM
else:
self._pixel2world_cache = Affine2D()
self._pixel2world.translate(+0.5, +0.5)
self._pixel2world.scale((extent[1] - extent[0]) / self._full_res.shape[1],
(extent[3] - extent[2]) / self._full_res.shape[0])
self._pixel2world.translate(extent[0], extent[2])
self._world2pixel_cache = None
return self._pixel2world_cache
@property
def _world2pixel(self):
if self._world2pixel_cache is None:
self._world2pixel_cache = self._pixel2world.inverted()
return self._world2pixel_cache
def _scale_to_res(self):
"""
Change self._A and _extent to render an image whose resolution is
matched to the eventual rendering.
"""
# Find out how we need to slice the array to make sure we match the
# resolution of the display. We pass self._world2pixel which matters
# for cases where the extent has been set.
x0, x1, sx, y0, y1, sy = extract_matched_slices(axes=self.axes,
shape=self._full_res.shape,
transform=self._world2pixel)
# Check whether we've already calculated what we need, and if so just
# return without doing anything further.
if (self._bounds is not None and
sx >= self._sx and sy >= self._sy and
x0 >= self._bounds[0] and x1 <= self._bounds[1] and
y0 >= self._bounds[2] and y1 <= self._bounds[3]):
return
# Slice the array using the slices determined previously to optimally
# match the display
self._A = self._full_res[y0:y1:sy, x0:x1:sx]
self._A = cbook.safe_masked_invalid(self._A)
# We now determine the extent of the subset of the image, by determining
# it first in pixel space, and converting it to the 'world' coordinates.
# See https://github.com/matplotlib/matplotlib/issues/8693 for a
# demonstration of why origin='upper' and extent=None needs to be
# special-cased.
if self.origin == 'upper' and self._full_extent is None:
xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y1 - .5, y0 - .5
else:
xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y0 - .5, y1 - .5
xmin, ymin, xmax, ymax = self._pixel2world.transform([(xmin, ymin), (xmax, ymax)]).ravel()
mi.AxesImage.set_extent(self, [xmin, xmax, ymin, ymax])
# self.set_extent([xmin, xmax, ymin, ymax])
# Finally, we cache the current settings to avoid re-computing similar
# arrays in future.
self._sx = sx
self._sy = sy
self._bounds = (x0, x1, y0, y1)
self.changed()
def draw(self, renderer, *args, **kwargs):
if self._full_res.shape is None:
return
self._scale_to_res()
super(ModestImage, self).draw(renderer, *args, **kwargs)
def main():
from time import time
import matplotlib.pyplot as plt
x, y = np.mgrid[0:2000, 0:2000]
data = np.sin(x / 10.) * np.cos(y / 30.)
f = plt.figure()
ax = f.add_subplot(111)
# try switching between
artist = ModestImage(ax, data=data)
ax.set_aspect('equal')
artist.norm.vmin = -1
artist.norm.vmax = 1
ax.add_artist(artist)
t0 = time()
plt.gcf().canvas.draw()
t1 = time()
print("Draw time for %s: %0.1f ms" % (artist.__class__.__name__,
(t1 - t0) * 1000))
plt.show()
def imshow(axes, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""Similar to matplotlib's imshow command, but produces a ModestImage
Unlike matplotlib version, must explicitly specify axes
"""
if not axes._hold:
axes.cla()
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
if aspect is None:
aspect = rcParams['image.aspect']
axes.set_aspect(aspect)
im = ModestImage(axes, cmap=cmap, norm=norm, interpolation=interpolation,
origin=origin, extent=extent, filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
axes._set_artist_props(im)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(axes.patch)
# if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
elif norm is None:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
axes.images.append(im)
im._remove_method = lambda h: axes.images.remove(h)
return im
def extract_matched_slices(axes=None, shape=None, extent=None,
transform=IDENTITY_TRANSFORM):
"""Determine the slice parameters to use, matched to the screen.
:param ax: Axes object to query. It's extent and pixel size
determine the slice parameters
:param shape: Tuple of the full image shape to slice into. Upper
boundaries for slices will be cropped to fit within
this shape.
:rtype: tulpe of x0, x1, sx, y0, y1, sy
Indexing the full resolution array as array[y0:y1:sy, x0:x1:sx] returns
a view well-matched to the axes' resolution and extent
"""
# Find extent in display pixels (this gives the resolution we need
# to sample the array to)
ext = (axes.transAxes.transform([(1, 1)]) - axes.transAxes.transform([(0, 0)]))[0]
# Find the extent of the axes in 'world' coordinates
xlim, ylim = axes.get_xlim(), axes.get_ylim()
# Transform the limits to pixel coordinates
ind0 = transform.transform([min(xlim), min(ylim)])
ind1 = transform.transform([max(xlim), max(ylim)])
def _clip(val, lo, hi):
return int(max(min(val, hi), lo))
# Determine the range of pixels to extract from the array, including a 5
# pixel margin all around. We ensure that the shape of the resulting array
# will always be at least (1, 1) even if there is really no overlap, to
# avoid issues.
y0 = _clip(ind0[1] - 5, 0, shape[0] - 1)
y1 = _clip(ind1[1] + 5, 1, shape[0])
x0 = _clip(ind0[0] - 5, 0, shape[1] - 1)
x1 = _clip(ind1[0] + 5, 1, shape[1])
# Determine the strides that can be used when extracting the array
sy = int(max(1, min((y1 - y0) / 5., np.ceil(abs((ind1[1] - ind0[1]) / ext[1])))))
sx = int(max(1, min((x1 - x0) / 5., np.ceil(abs((ind1[0] - ind0[0]) / ext[0])))))
return x0, x1, sx, y0, y1, sy
if __name__ == "__main__":
main()
| gpl-3.0 |
xebitstudios/Kayak | examples/poisson_glm.py | 3 | 1224 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import kayak
N = 10000
D = 5
P = 1
learn = 0.00001
batch_size = 500
# Random inputs.
X = npr.randn(N,D)
true_W = npr.randn(D,P)
lam = np.exp(np.dot(X, true_W))
Y = npr.poisson(lam)
kyk_batcher = kayak.Batcher(batch_size, N)
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)
# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)
# Weights.
W = 0.01*npr.randn(D,P)
kyk_W = kayak.Parameter(W)
# Linear layer.
kyk_activation = kayak.MatMult( kyk_inputs, kyk_W)
# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)
# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets
# Sum the losses.
kyk_loss = kayak.MatSum( kyk_nll )
for ii in xrange(100):
for batch in kyk_batcher:
loss = kyk_loss.value
print loss, np.sum((kyk_W.value - true_W)**2)
grad = kyk_loss.grad(kyk_W)
kyk_W.value -= learn * grad
# Plot the true and inferred rate for a subset of data.
T_slice = slice(0,100)
kyk_inputs.value = X[T_slice,:]
plt.figure()
plt.plot(lam[T_slice], 'k')
plt.plot(kyk_lam.value, '--r')
plt.show() | mit |
rohanp/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 23 | 45330 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
DistrictDataLabs/django-data-product | irisfinder/views.py | 1 | 1948 | from django.shortcuts import render
import datetime
from models import Iris, SVMModels
from forms import UserIrisData
import sklearn
from sklearn import svm
from sklearn.cross_validation import train_test_split
import numpy as np
from django.conf import settings
import cPickle
import scipy
from pytz import timezone
import random
# Create your views here.
def predict(request):
data = {
"app_name": "irisfinder",
"random_number": random.randint(0, 10000)
}
if request.method == "GET":
form = UserIrisData()
data.update({"form": form, "submit": True})
elif request.method == "POST":
form = UserIrisData(request.POST)
sepal_length = request.POST.get("sepal_length")
sepal_width = request.POST.get("sepal_width")
petal_length = request.POST.get("petal_length")
petal_width = request.POST.get("petal_width")
if request.POST.get('submit'):
user_data = Iris(user_data=True,
sepal_length=sepal_length,
sepal_width=sepal_width,
petal_length=petal_length,
petal_width=petal_width)
user_data.save()
model_object = SVMModels.objects.order_by("-run_date").first()
model = cPickle.loads(model_object.model_pickle)
prediction = model.predict([sepal_length, sepal_width, petal_length, petal_width])
item_pk = user_data.pk
species = prediction[0]
data.update({"form": form, "verify": True, "item_pk": item_pk,
"species": species, "prediction": prediction[0]})
elif request.POST.get('verified'):
user_data = Iris.objects.get(pk=int(request.POST.get("item_pk")))
user_data.species = request.POST.get("species")
user_data.save()
return render(request, "predict.html", context=data) | apache-2.0 |
ndardenne/pymatgen | pymatgen/io/abinit/tasks.py | 2 | 166549 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.serializers.json_coders import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ScfTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_etsfio
True if etsf-io is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
with open(stdout, "r") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.has_netcdf = False
self.has_etsfio = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
self.has_etsfio = "etsf_io" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s, ETSF-IO: %s" % (self.has_netcdf, self.has_etsfio))
return "\n".join(lines)
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@abc.abstractproperty
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
self.queue_errors = scheduler_parser.errors
# the queue errors in the task
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
# self.history.critical(msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed and we know what happened
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
msg = 'found unknown messages in the queue error: %s' % str(qerr_info)
logger.history.info(msg)
print(msg)
# self.num_waiting += 1
# if self.num_waiting > 1000:
rt = self.datetimes.get_runtime().seconds
tl = self.manager.qadapter.timelimit
if rt > tl:
msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
print(msg)
return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file. TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error trying to find a running configuration:\n%s" % straceback()
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
# logger.critical(msg)
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
out_den = self.outdir.path_in("out_DEN")
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
# TODO Remove
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: string with the optic variables (filepaths will be added at run time).
nscf_node: The NSCF task that will produce thw WFK file or string with the path of the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDF paths.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
deps = {n: "1WF" for n in self.ddk_nodes}
#deps = {n: "DDK" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_"+str(n+1) : ddk for n,ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile" : self.wfk_filepath})
files_nml = {"FILES" : all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
results = super(OpticTask, self).get_results(**kwargs)
#results.update(
#"epsilon_infinity":
#))
return results
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
| mit |
tmeits/pybrain | pybrain/auxiliary/gaussprocess.py | 25 | 9240 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, [email protected]; Christian Osendorfer, [email protected]'
from scipy import r_, exp, zeros, eye, array, asarray, random, ravel, diag, sqrt, sin, cos, sort, mgrid, dot, floor
from scipy import c_ #@UnusedImport
from scipy.linalg import solve, inv
from pybrain.datasets import SupervisedDataSet
from scipy.linalg import norm
class GaussianProcess:
""" This class represents a basic n-dimensional Gaussian Process. The implementation
follows the book 'Gaussian Processes for Machine Learning' by Carl E. Rasmussen
(an online version is available at: http://www.gaussianprocess.org/gpml/chapters/).
The hyper parameters of the GP can be adjusted by setting the self.hyper varible,
which must be a tuple of size 3.
"""
def __init__(self, indim, start=0, stop=1, step=0.1):
""" initializes the gaussian process object.
:arg indim: input dimension
:key start: start of interval for sampling the GP.
:key stop: stop of interval for sampling the GP.
:key step: stepsize for sampling interval.
:note: start, stop, step can either be scalars or tuples of size 'indim'.
"""
self.mean = 0
self.start = start
self.stop = stop
self.step = step
self.indim = indim
self.trainx = zeros((0, indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.testx = self._buildGrid()
self.calculated = True
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
self.autonoise = False
self.hyper = (0.5, 2.0, 0.1)
def _kernel(self, a, b):
""" kernel function, here RBF kernel """
(l, sigma_f, _sigma_n) = self.hyper
r = sigma_f ** 2 * exp(-1.0 / (2 * l ** 2) * norm(a - b, 2) ** 2)
# if a == b:
# r += sigma_n**2
return r
def _buildGrid(self):
(start, stop, step) = (self.start, self.stop, self.step)
""" returns a mgrid type of array for 'dim' dimensions """
if isinstance(start, (int, float, complex)):
dimstr = 'start:stop:step, '*self.indim
else:
assert len(start) == len(stop) == len(step)
dimstr = ["start[%i]:stop[%i]:step[%i], " % (i, i, i) for i in range(len(start))]
dimstr = ''.join(dimstr)
return eval('c_[map(ravel, mgrid[' + dimstr + '])]').T
def _buildCov(self, a, b):
K = zeros((len(a), len(b)), float)
for i in range(len(a)):
for j in range(len(b)):
K[i, j] = self._kernel(a[i, :], b[j, :])
return K
def reset(self):
self.trainx = zeros((0, self.indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
def trainOnDataset(self, dataset):
""" takes a SequentialDataSet with indim input dimension and scalar target """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = dataset.getField('input')
self.trainy = ravel(dataset.getField('target'))
self.noise = array([0.001] * len(self.trainx))
# print(self.trainx, self.trainy)
self.calculated = False
def addDataset(self, dataset):
""" adds the points from the dataset to the training set """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = r_[self.trainx, dataset.getField('input')]
self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]
self.noise = array([0.001] * len(self.trainx))
self.calculated = False
def addSample(self, train, target):
self.trainx = r_[self.trainx, asarray([train])]
self.trainy = r_[self.trainy, asarray(target)]
self.noise = r_[self.noise, array([0.001])]
self.calculated = False
def testOnArray(self, arr):
self.testx = arr
self._calculate()
return self.pred_mean
def _calculate(self):
# calculate only of necessary
if len(self.trainx) == 0:
return
# build covariance matrices
train_train = self._buildCov(self.trainx, self.trainx)
train_test = self._buildCov(self.trainx, self.testx)
test_train = train_test.T
test_test = self._buildCov(self.testx, self.testx)
# calculate predictive mean and covariance
K = train_train + self.noise * eye(len(self.trainx))
if self.autonoise:
# calculate average neighboring distance for auto-noise
avgdist = 0
sort_trainx = sort(self.trainx)
for i, d in enumerate(sort_trainx):
if i == 0:
continue
avgdist += d - sort_trainx[i - 1]
avgdist /= len(sort_trainx) - 1
# sort(self.trainx)
# add auto-noise from neighbouring samples (not standard gp)
for i in range(len(self.trainx)):
for j in range(len(self.trainx)):
if norm(self.trainx[i] - self.trainx[j]) > avgdist:
continue
d = norm(self.trainy[i] - self.trainy[j]) / (exp(norm(self.trainx[i] - self.trainx[j])))
K[i, i] += d
self.pred_mean = self.mean + dot(test_train, solve(K, self.trainy - self.mean, sym_pos=0))
self.pred_cov = test_test - dot(test_train, dot(inv(K), train_test))
self.calculated = True
def draw(self):
if not self.calculated:
self._calculate()
return self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov)
def plotCurves(self, showSamples=False, force2D=True):
from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray
if not self.calculated:
self._calculate()
if self.indim == 1:
clf()
hold(True)
if showSamples:
# plot samples (gray)
for _ in range(5):
plot(self.testx, self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov), color='gray')
# plot training set
plot(self.trainx, self.trainy, 'bx')
# plot mean (blue)
plot(self.testx, self.pred_mean, 'b', linewidth=1)
# plot variance (as "polygon" going from left to right for upper half and back for lower half)
fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
fill(fillx, filly, facecolor='gray', edgecolor='white', alpha=0.3)
title('1D Gaussian Process with mean and variance')
elif self.indim == 2 and not force2D:
from matplotlib import axes3d as a3
fig = gcf()
fig.clear()
ax = a3.Axes3D(fig) #@UndefinedVariable
# plot training set
ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), 'ro')
# plot mean
(x, y, z) = [m.reshape(sqrt(len(m)), sqrt(len(m))) for m in (self.testx[:, 0], self.testx[:, 1], self.pred_mean)]
ax.plot_wireframe(x, y, z, colors='gray')
return ax
elif self.indim == 2 and force2D:
# plot mean on pcolor map
gray()
# (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
m = floor(sqrt(len(self.pred_mean)))
pcolor(self.pred_mean.reshape(m, m)[::-1, :])
else: print("plotting only supported for indim=1 or indim=2.")
if __name__ == '__main__':
from pylab import figure, show
# --- example on how to use the GP in 1 dimension
ds = SupervisedDataSet(1, 1)
gp = GaussianProcess(indim=1, start= -3, stop=3, step=0.05)
figure()
x = mgrid[-3:3:0.2]
y = 0.1 * x ** 2 + x + 1
z = sin(x) + 0.5 * cos(y)
ds.addSample(-2.5, -1)
ds.addSample(-1.0, 3)
gp.mean = 0
# new feature "autonoise" adds uncertainty to data depending on
# it's distance to other points in the dataset. not tested much yet.
# gp.autonoise = True
gp.trainOnDataset(ds)
gp.plotCurves(showSamples=True)
# you can also test the gp on single points, but this deletes the
# original testing grid. it can be restored with a call to _buildGrid()
print((gp.testOnArray(array([[0.4]]))))
# --- example on how to use the GP in 2 dimensions
ds = SupervisedDataSet(2, 1)
gp = GaussianProcess(indim=2, start=0, stop=5, step=0.2)
figure()
x, y = mgrid[0:5:4j, 0:5:4j]
z = cos(x) * sin(y)
(x, y, z) = list(map(ravel, [x, y, z]))
for i, j, k in zip(x, y, z):
ds.addSample([i, j], [k])
gp.trainOnDataset(ds)
gp.plotCurves()
show()
| bsd-3-clause |
cosmoharrigan/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
dremio/arrow | integration/integration_test.py | 4 | 33972 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import binascii
import glob
import itertools
import json
import os
import random
import six
import string
import subprocess
import tempfile
import uuid
import errno
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
tag_pattern = '{http://maven.apache.org/POM/4.0.0}version'
version_tag = list(tree.getroot().findall(tag_pattern))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def tobytes(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
return o
def frombytes(o):
if isinstance(o, six.binary_type):
return o.decode('utf8')
return o
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(frombytes(e.output))
print('--------------')
raise e
return frombytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def __len__(self):
return self.count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
super(PrimitiveColumn, self).__init__(name, count)
self.is_valid = is_valid
self.values = values
def _encode_value(self, x):
return x
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list([self._encode_value(x) for x in self.values]))
]
TEST_INT_MAX = 2 ** 31 - 1
TEST_INT_MIN = ~TEST_INT_MAX
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True,
min_value=TEST_INT_MIN,
max_value=TEST_INT_MAX):
super(IntegerType, self).__init__(name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
self.min_value = min_value
self.max_value = max_value
def _get_generated_data_bounds(self):
signed_iinfo = np.iinfo('int' + str(self.bit_width))
if self.is_signed:
min_value, max_value = signed_iinfo.min, signed_iinfo.max
else:
# ARROW-1837 Remove this hack and restore full unsigned integer
# range
min_value, max_value = 0, signed_iinfo.max
lower_bound = max(min_value, self.min_value)
upper_bound = min(max_value, self.max_value)
return lower_bound, upper_bound
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size, name=None):
lower_bound, upper_bound = self._get_generated_data_bounds()
return self.generate_range(size, lower_bound, upper_bound, name=name)
def generate_range(self, size, lower, upper, name=None):
values = [int(x) for x in
np.random.randint(lower, upper, size=size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class DateType(IntegerType):
DAY = 0
MILLISECOND = 1
# 1/1/1 to 12/31/9999
_ranges = {
DAY: [-719162, 2932896],
MILLISECOND: [-62135596800000, 253402214400000]
}
def __init__(self, name, unit, nullable=True):
bit_width = 32 if unit == self.DAY else 64
min_value, max_value = self._ranges[unit]
super(DateType, self).__init__(
name, True, bit_width, nullable=nullable,
min_value=min_value, max_value=max_value
)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'date'),
('unit', 'DAY' if self.unit == self.DAY else 'MILLISECOND')
])
TIMEUNIT_NAMES = {
's': 'SECOND',
'ms': 'MILLISECOND',
'us': 'MICROSECOND',
'ns': 'NANOSECOND'
}
class TimeType(IntegerType):
BIT_WIDTHS = {
's': 32,
'ms': 32,
'us': 64,
'ns': 64
}
_ranges = {
's': [0, 86400],
'ms': [0, 86400000],
'us': [0, 86400000000],
'ns': [0, 86400000000000]
}
def __init__(self, name, unit='s', nullable=True):
min_val, max_val = self._ranges[unit]
super(TimeType, self).__init__(name, True, self.BIT_WIDTHS[unit],
nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
def _get_type(self):
return OrderedDict([
('name', 'time'),
('unit', TIMEUNIT_NAMES[self.unit]),
('bitWidth', self.bit_width)
])
class TimestampType(IntegerType):
# 1/1/1 to 12/31/9999
_ranges = {
's': [-62135596800, 253402214400],
'ms': [-62135596800000, 253402214400000],
'us': [-62135596800000000, 253402214400000000],
# Physical range for int64, ~584 years and change
'ns': [np.iinfo('int64').min, np.iinfo('int64').max]
}
def __init__(self, name, unit='s', tz=None, nullable=True):
min_val, max_val = self._ranges[unit]
super(TimestampType, self).__init__(name, True, 64, nullable=nullable,
min_value=min_val,
max_value=max_val)
self.unit = unit
self.tz = tz
def _get_type(self):
fields = [
('name', 'timestamp'),
('unit', TIMEUNIT_NAMES[self.unit])
]
if self.tz is not None:
fields.append(('timezone', self.tz))
return OrderedDict(fields)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
super(FloatingPointType, self).__init__(name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size, name=None):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
DECIMAL_PRECISION_TO_VALUE = {
key: (1 << (8 * i - 1)) - 1 for i, key in enumerate(
[1, 3, 5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 32, 34, 36],
start=1,
)
}
def decimal_range_from_precision(precision):
assert 1 <= precision <= 38
try:
max_value = DECIMAL_PRECISION_TO_VALUE[precision]
except KeyError:
return decimal_range_from_precision(precision - 1)
else:
return ~max_value, max_value
class DecimalType(PrimitiveType):
def __init__(self, name, precision, scale, bit_width=128, nullable=True):
super(DecimalType, self).__init__(name, nullable=True)
self.precision = precision
self.scale = scale
self.bit_width = bit_width
@property
def numpy_type(self):
return object
def _get_type(self):
return OrderedDict([
('name', 'decimal'),
('precision', self.precision),
('scale', self.scale),
])
def generate_column(self, size, name=None):
min_value, max_value = decimal_range_from_precision(self.precision)
values = [random.randint(min_value, max_value) for _ in range(size)]
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return DecimalColumn(name, size, is_valid, values, self.bit_width)
class DecimalColumn(PrimitiveColumn):
def __init__(self, name, count, is_valid, values, bit_width=128):
super(DecimalColumn, self).__init__(name, count, is_valid, values)
self.bit_width = bit_width
def _encode_value(self, x):
return str(x)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size, name=None):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
if name is None:
name = self.name
return PrimitiveColumn(name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class FixedSizeBinaryType(PrimitiveType):
def __init__(self, name, byte_width, nullable=True):
super(FixedSizeBinaryType, self).__init__(name, nullable=nullable)
self.byte_width = byte_width
@property
def numpy_type(self):
return object
@property
def column_class(self):
return FixedSizeBinaryColumn
def _get_type(self):
return OrderedDict([('name', 'fixedsizebinary'), ('byteWidth', self.byte_width)])
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', self.byte_width)])])])
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
draw = (np.random.randint(0, 255, size=self.byte_width)
.astype(np.uint8)
.tostring())
values.append(draw)
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size, name=None):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(tobytes(rands(K)))
else:
values.append(b"")
if name is None:
name = self.name
return self.column_class(name, size, is_valid, values)
class JsonSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return OrderedDict([
('fields', [field.get_json() for field in self.fields])
])
class BinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return frombytes(binascii.hexlify(x).upper())
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = b""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', offsets),
('DATA', data)
]
class FixedSizeBinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return ''.join('{:02x}'.format(c).upper() for c in x)
def _get_buffers(self):
data = []
for i, v in enumerate(self.values):
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('DATA', data)
]
class StringColumn(BinaryColumn):
def _encode_value(self, x):
return frombytes(x)
class ListType(DataType):
def __init__(self, name, value_type, nullable=True):
super(ListType, self).__init__(name, nullable=nullable)
self.value_type = value_type
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_type.get_json()]
def generate_column(self, size, name=None):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_type.generate_column(offset)
if name is None:
name = self.name
return ListColumn(name, size, is_valid, offsets, values)
class ListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
super(ListColumn, self).__init__(name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class StructType(DataType):
def __init__(self, name, field_types, nullable=True):
super(StructType, self).__init__(name, nullable=nullable)
self.field_types = field_types
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [type_.get_json() for type_ in self.field_types]
def generate_column(self, size, name=None):
is_valid = self._make_is_valid(size)
field_values = [type_.generate_column(size)
for type_ in self.field_types]
if name is None:
name = self.name
return StructColumn(name, size, is_valid, field_values)
class Dictionary(object):
def __init__(self, id_, field, values, ordered=False):
self.id_ = id_
self.field = field
self.values = values
self.ordered = ordered
def __len__(self):
return len(self.values)
def get_json(self):
dummy_batch = JsonRecordBatch(len(self.values), [self.values])
return OrderedDict([
('id', self.id_),
('data', dummy_batch.get_json())
])
class DictionaryType(DataType):
def __init__(self, name, index_type, dictionary, nullable=True):
super(DictionaryType, self).__init__(name, nullable=nullable)
assert isinstance(index_type, IntegerType)
assert isinstance(dictionary, Dictionary)
self.index_type = index_type
self.dictionary = dictionary
def get_json(self):
dict_field = self.dictionary.field
return OrderedDict([
('name', self.name),
('type', dict_field._get_type()),
('nullable', self.nullable),
('children', dict_field._get_children()),
('dictionary', OrderedDict([
('id', self.dictionary.id_),
('indexType', self.index_type._get_type()),
('isOrdered', self.dictionary.ordered)
]))
])
def generate_column(self, size, name=None):
if name is None:
name = self.name
return self.index_type.generate_range(size, 0, len(self.dictionary),
name=name)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
super(StructColumn, self).__init__(name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class JsonRecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class JsonFile(object):
def __init__(self, name, schema, batches, dictionaries=None):
self.name = name
self.schema = schema
self.dictionaries = dictionaries or []
self.batches = batches
def get_json(self):
entries = [
('schema', self.schema.get_json())
]
if len(self.dictionaries) > 0:
entries.append(('dictionaries',
[dictionary.get_json()
for dictionary in self.dictionaries]))
entries.append(('batches', [batch.get_json()
for batch in self.batches]))
return OrderedDict(entries)
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
def get_field(name, type_, nullable=True):
if type_ == 'binary':
return BinaryType(name, nullable=nullable)
elif type_ == 'utf8':
return StringType(name, nullable=nullable)
elif type_.startswith('fixedsizebinary_'):
byte_width = int(type_.split('_')[1])
return FixedSizeBinaryType(name, byte_width=byte_width, nullable=nullable)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'f':
return FloatingPointType(name, dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'b':
return BooleanType(name, nullable=nullable)
else:
raise TypeError(dtype)
def _generate_file(name, fields, batch_sizes, dictionaries=None):
schema = JsonSchema(fields)
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JsonRecordBatch(size, columns))
return JsonFile(name, schema, batches, dictionaries)
def generate_primitive_case(batch_sizes, name='primitive'):
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8',
'fixedsizebinary_19', 'fixedsizebinary_120']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, True))
fields.append(get_field(type_ + "_nonnullable", type_, False))
return _generate_file(name, fields, batch_sizes)
def generate_decimal_case():
fields = [
DecimalType(name='f{}'.format(i), precision=precision, scale=2)
for i, precision in enumerate(range(3, 39))
]
possible_batch_sizes = 7, 10
batch_sizes = [possible_batch_sizes[i % 2] for i in range(len(fields))]
return _generate_file('decimal', fields, batch_sizes)
def generate_datetime_case():
fields = [
DateType('f0', DateType.DAY),
DateType('f1', DateType.MILLISECOND),
TimeType('f2', 's'),
TimeType('f3', 'ms'),
TimeType('f4', 'us'),
TimeType('f5', 'ns'),
TimestampType('f6', 's'),
TimestampType('f7', 'ms'),
TimestampType('f8', 'us'),
TimestampType('f9', 'ns'),
TimestampType('f10', 'ms', tz=None),
TimestampType('f11', 's', tz='UTC'),
TimestampType('f12', 'ms', tz='US/Eastern'),
TimestampType('f13', 'us', tz='Europe/Paris'),
TimestampType('f14', 'ns', tz='US/Pacific')
]
batch_sizes = [7, 10]
return _generate_file("datetime", fields, batch_sizes)
def generate_nested_case():
fields = [
ListType('list_nullable', get_field('item', 'int32')),
StructType('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# TODO(wesm): this causes segfault
# ListType('list_nonnullable', get_field('item', 'int32'), False),
]
batch_sizes = [7, 10]
return _generate_file("nested", fields, batch_sizes)
def generate_dictionary_case():
dict_type1 = StringType('dictionary1')
dict_type2 = get_field('dictionary2', 'int64')
dict1 = Dictionary(0, dict_type1,
dict_type1.generate_column(10, name='DICT0'))
dict2 = Dictionary(1, dict_type2,
dict_type2.generate_column(50, name='DICT1'))
fields = [
DictionaryType('dict1_0', get_field('', 'int8'), dict1),
DictionaryType('dict1_1', get_field('', 'int32'), dict1),
DictionaryType('dict2_0', get_field('', 'int16'), dict2)
]
batch_sizes = [7, 10]
return _generate_file("dictionary", fields, batch_sizes,
dictionaries=[dict1, dict2])
def get_generated_json_files():
temp_dir = tempfile.mkdtemp()
def _temp_path():
return
file_objs = [
generate_primitive_case([17, 20], name='primitive'),
generate_primitive_case([0, 0, 0], name='primitive_zerolength'),
generate_decimal_case(),
generate_datetime_case(),
generate_nested_case(),
generate_dictionary_case()
]
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(temp_dir, 'generated_' +
file_obj.name + '.json')
file_obj.write(out_path)
generated_paths.append(out_path)
return generated_paths
# ----------------------------------------------------------------------
# Testing harness
class IntegrationRunner(object):
def __init__(self, json_files, testers, debug=False):
self.json_files = json_files
self.testers = testers
self.temp_dir = tempfile.mkdtemp()
self.debug = debug
def run(self):
for producer, consumer in itertools.product(filter(lambda t: t.PRODUCER, self.testers),
filter(lambda t: t.CONSUMER, self.testers)):
self._compare_implementations(producer, consumer)
def _compare_implementations(self, producer, consumer):
print('##########################################################')
print(
'{0} producing, {1} consuming'.format(producer.name, consumer.name)
)
print('##########################################################')
for json_path in self.json_files:
print('==========================================================')
print('Testing file {0}'.format(json_path))
print('==========================================================')
name = os.path.splitext(os.path.basename(json_path))[0]
# Make the random access file
print('-- Creating binary inputs')
producer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.json_to_arrow')
producer.json_to_file(json_path, producer_file_path)
# Validate the file
print('-- Validating file')
consumer.validate(json_path, producer_file_path)
print('-- Validating stream')
producer_stream_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.arrow_to_stream')
consumer_file_path = os.path.join(self.temp_dir, guid() + '_' +
name + '.stream_to_arrow')
producer.file_to_stream(producer_file_path,
producer_stream_path)
consumer.stream_to_file(producer_stream_path,
consumer_file_path)
consumer.validate(json_path, consumer_file_path)
class Tester(object):
PRODUCER = False
CONSUMER = False
def __init__(self, debug=False):
self.debug = debug
def json_to_file(self, json_path, arrow_path):
raise NotImplementedError
def stream_to_file(self, stream_path, file_path):
raise NotImplementedError
def file_to_stream(self, file_path, stream_path):
raise NotImplementedError
def validate(self, json_path, arrow_path):
raise NotImplementedError
class JavaTester(Tester):
PRODUCER = True
CONSUMER = True
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_HOME,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile',
stream_path, file_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream',
file_path, stream_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
class CPPTester(Tester):
PRODUCER = True
CONSUMER = True
EXE_PATH = os.environ.get(
'ARROW_CPP_EXE_PATH',
os.path.join(ARROW_HOME, 'cpp/build/debug'))
CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'json-integration-test')
STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file')
FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream')
name = 'C++'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.CPP_INTEGRATION_EXE, '--integration']
if arrow_path is not None:
cmd.append('--arrow=' + arrow_path)
if json_path is not None:
cmd.append('--json=' + json_path)
cmd.append('--mode=' + command)
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
class JSTester(Tester):
PRODUCER = False
CONSUMER = True
INTEGRATION_EXE = os.path.join(ARROW_HOME, 'js/bin/integration.js')
name = 'JS'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.INTEGRATION_EXE]
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['--mode', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def stream_to_file(self, stream_path, file_path):
# Just copy stream to file, we can read the stream directly
cmd = ['cp', stream_path, file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def get_static_json_files():
glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json')
return glob.glob(glob_pattern)
def run_all_tests(debug=False):
testers = [CPPTester(debug=debug), JavaTester(debug=debug), JSTester(debug=debug)]
static_json_files = get_static_json_files()
generated_json_files = get_generated_json_files()
json_files = static_json_files + generated_json_files
runner = IntegrationRunner(json_files, testers, debug=debug)
runner.run()
print('-- All tests passed!')
def write_js_test_json(directory):
generate_nested_case().write(os.path.join(directory, 'nested.json'))
generate_decimal_case().write(os.path.join(directory, 'decimal.json'))
generate_datetime_case().write(os.path.join(directory, 'datetime.json'))
(generate_dictionary_case()
.write(os.path.join(directory, 'dictionary.json')))
(generate_primitive_case([7, 10])
.write(os.path.join(directory, 'primitive.json')))
(generate_primitive_case([0, 0, 0])
.write(os.path.join(directory, 'primitive-empty.json')))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arrow integration test CLI')
parser.add_argument('--write_generated_json', dest='generated_json_path',
action='store', default=False,
help='Generate test JSON')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='Run executables in debug mode as relevant')
args = parser.parse_args()
if args.generated_json_path:
try:
os.makedirs(args.generated_json_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
write_js_test_json(args.generated_json_path)
else:
run_all_tests(debug=args.debug)
| apache-2.0 |
harisbal/pandas | pandas/tests/series/test_missing.py | 1 | 51950 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from distutils.version import LooseVersion
import numpy as np
from numpy import nan
import pytest
import pytz
from pandas._libs.tslib import iNaT
from pandas.compat import range
from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
Timestamp, date_range, isna)
from pandas.core.series import remove_na
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
LooseVersion('0.19.0'))
except ImportError:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData():
def test_remove_na_deprecation(self):
# see gh-16971
with tm.assert_produces_warning(FutureWarning):
remove_na(Series([]))
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp('20130101'), pd.NaT])
result = s.fillna(Timestamp('20130101', tz='US/Eastern'))
expected = Series([Timestamp('20130101'),
Timestamp('2013-01-01', tz='US/Eastern')],
dtype='object')
assert_series_equal(result, expected)
# where (we ignore the errors=)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
errors='ignore')
assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna('foo')
expected = Series([Timestamp('20130101'),
'foo'])
assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = 'foo'
assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
@pytest.mark.parametrize('fill_value, expected_output', [
('a', ['a', 'a', 'b', 'a', 'a']),
({1: 'a', 3: 'b', 4: 'b'}, ['a', 'a', 'b', 'b', 'b']),
({1: 'a'}, ['a', 'a', 'b', np.nan, np.nan]),
({1: 'a', 3: 'b'}, ['a', 'a', 'b', 'b', np.nan]),
(Series('a'), ['a', np.nan, 'b', np.nan, np.nan]),
(Series('a', index=[1]), ['a', 'a', 'b', np.nan, np.nan]),
(Series({1: 'a', 3: 'b'}), ['a', 'a', 'b', 'b', np.nan]),
(Series(['a', 'b'], index=[3, 4]), ['a', np.nan, 'b', 'a', 'b'])
])
def test_fillna_categorical(self, fill_value, expected_output):
# GH 17033
# Test fillna for a Categorical series
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
exp = Series(Categorical(expected_output, categories=['a', 'b']))
tm.assert_series_equal(s.fillna(fill_value), exp)
def test_fillna_categorical_raise(self):
data = ['a', np.nan, 'b', np.nan, np.nan]
s = Series(Categorical(data, categories=['a', 'b']))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna('d')
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna(Series('d'))
with tm.assert_raises_regex(ValueError,
"fill value must be in categories"):
s.fillna({1: 'd', 3: 'a'})
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "list"'):
s.fillna(['a', 'b'])
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar or '
'dict, but you passed a "tuple"'):
s.fillna(('a', 'b'))
with tm.assert_raises_regex(TypeError,
'"value" parameter must be a scalar, dict '
'or Series, but you passed a "DataFrame"'):
s.fillna(DataFrame({1: ['a'], 3: ['b']}))
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_na', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
@tm.capture_stdout
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self, datetime_series):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, datetime_series.fillna, value=0,
method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
td1[1] = iNaT
assert isna(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
ser = Series(["hi", "", nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
def test_notna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
expected = Series([True, True, True, False, True])
tm.assert_series_equal(ser.notna(), expected)
ser = Series(["hi", "", nan])
expected = Series([True, True, False])
tm.assert_series_equal(ser.notna(), expected)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
# TODO: what is this test doing? why are result an expected
# the same call to fillna?
with tm.assert_produces_warning(PerformanceWarning):
# TODO: release-note fillna performance warning
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
with tm.assert_produces_warning(PerformanceWarning):
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData():
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float),
datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in datetime_series.index],
index=datetime_series.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = string_series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(**kwargs), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
@pytest.mark.parametrize("kwargs", [
{},
pytest.param({'method': 'polynomial', 'order': 1},
marks=td.skip_if_no_scipy)
])
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
# limit_area introduced GH #16284
def test_interp_limit_area(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([nan, nan, 3, nan, nan, nan, 7, nan, nan])
expected = Series([nan, nan, 3., 4., 5., 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., 4., nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit=1)
expected = Series([nan, nan, 3., 4., nan, 6., 7., nan, nan])
result = s.interpolate(method='linear', limit_area='inside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., 7.])
result = s.interpolate(method='linear', limit_area='outside')
assert_series_equal(result, expected)
expected = Series([nan, nan, 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit=1)
expected = Series([nan, 3., 3., nan, nan, nan, 7., 7., nan])
result = s.interpolate(method='linear', limit_area='outside',
limit_direction='both', limit=1)
assert_series_equal(result, expected)
expected = Series([3., 3., 3., nan, nan, nan, 7., nan, nan])
result = s.interpolate(method='linear', limit_area='outside',
direction='backward')
# raises an error even if limit type is wrong.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_area='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_all_good(self):
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
@pytest.mark.parametrize("check_scipy", [
False,
pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_interp_multiIndex(self, check_scipy):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
if check_scipy:
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
@td.skip_if_no_scipy
def test_interp_nonmono_raise(self):
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
@td.skip_if_no_scipy
def test_interp_datetime64(self):
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ['polynomial', 'spline'])
def test_no_order(self, method):
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method=method)
@td.skip_if_no_scipy
def test_spline(self):
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
@td.skip_if_no('scipy', min_version='0.15')
def test_spline_extrapolate(self):
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_smooth(self):
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
@td.skip_if_no_scipy
def test_spline_interpolation(self):
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
@td.skip_if_no_scipy
def test_spline_error(self):
# see gh-10633
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
| bsd-3-clause |
pandegroup/osprey | osprey/strategies.py | 2 | 12612 | from __future__ import print_function, absolute_import, division
import sys
import inspect
import socket
import numpy as np
from sklearn.utils import check_random_state
from sklearn.model_selection import ParameterGrid
try:
from hyperopt import (Trials, tpe, fmin, STATUS_OK, STATUS_RUNNING,
STATUS_FAIL)
except ImportError:
# hyperopt is optional, but required for hyperopt_tpe()
pass
from .search_space import EnumVariable
from .acquisition_functions import AcquisitionFunction
from .surrogate_models import (MaximumLikelihoodGaussianProcess,
GaussianProcessKernel)
DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
class BaseStrategy(object):
short_name = None
def suggest(self, history, searchspace):
"""
Parameters
----------
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
searchspace : SearchSpace
Instance of search_space.SearchSpace
random_state :i nteger or numpy.RandomState, optional
The random seed for sampling. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
new_params : dict
"""
raise NotImplementedError()
@staticmethod
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED'
for hparams, hscore, hstatus in history):
return True
else:
return False
class RandomSearch(BaseStrategy):
short_name = 'random'
def __init__(self, seed=None):
self.seed = seed
def suggest(self, history, searchspace):
"""Randomly suggest params from searchspace.
"""
return searchspace.rvs(self.seed)
class HyperoptTPE(BaseStrategy):
short_name = 'hyperopt_tpe'
def __init__(self, seed=None, gamma=0.25, seeds=20):
self.seed = seed
self.gamma = gamma
self.seeds = seeds
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [
i for i, c in enumerate(var.choices)
if c == params[var.name]
]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None
},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None,
'spec': None,
'state': 2,
'book_time': None,
'exp_key': None,
'refresh_time': None,
'version': 0
})
trials.refresh()
chosen_params_container = []
def suggest(*args, **kwargs):
return tpe.suggest(*args,
**kwargs,
gamma=self.gamma,
n_startup_jobs=self.seeds)
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn,
algo=tpe.suggest,
space=hp_searchspace,
trials=trials,
max_evals=len(trials.trials) + 1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params
@staticmethod
def _hyperopt_fmin_random_kwarg(random):
if 'rstate' in inspect.getargspec(fmin).args:
# 0.0.3-dev version uses this argument
kwargs = {'rstate': random, 'allow_trials_fmin': False}
elif 'rseed' in inspect.getargspec(fmin).args:
# 0.0.2 version uses different argument
kwargs = {'rseed': random.randint(2**32 - 1)}
return kwargs
class Bayes(BaseStrategy):
short_name = 'bayes'
def __init__(self,
acquisition=None,
surrogate=None,
kernels=None,
seed=None,
seeds=1,
max_feval=5E4,
max_iter=1E5,
n_iter=50):
self.seed = seed
self.seeds = seeds
self.max_feval = max_feval
self.max_iter = max_iter
self.n_iter = n_iter
self.n_dims = None
if surrogate is None:
surrogate = 'gp'
self.surrogate = surrogate
if kernels is None:
kernels = [{
'name': 'GPy.kern.Matern52',
'params': {
'ARD': True
},
'options': {
'independent': False
}
}]
self.kernel_params = kernels
if acquisition is None:
acquisition = {'name': 'osprey', 'params': {}}
self.acquisition_params = acquisition
def _get_data(self, history, searchspace):
X = []
Y = []
V = []
ignore = []
for param_dict, scores, status in history:
# transform points into the GP domain. This invloves bringing
# int and enum variables to floating point, etc.
if status == 'FAILED':
# not sure how to deal with these yet
continue
point = searchspace.point_to_unit(param_dict)
if status == 'SUCCEEDED':
X.append(point)
Y.append(np.mean(scores))
V.append(np.var(scores))
elif status == 'PENDING':
ignore.append(point)
else:
raise RuntimeError('unrecognized status: %s' % status)
return (np.array(X).reshape(-1, self.n_dims),
np.array(Y).reshape(-1, 1), np.array(V).reshape(-1, 1),
np.array(ignore).reshape(-1, self.n_dims))
def _from_unit(self, result, searchspace):
# Note that GP only deals with float-valued variables, so we have
# a transform step on either side, where int and enum valued variables
# are transformed before calling gp, and then the result suggested by
# GP needs to be reverse-transformed.
out = {}
for gpvalue, var in zip(result, searchspace):
out[var.name] = var.point_from_unit(float(gpvalue))
return out
def _is_within(self, point, X, tol=1E-2):
if True in (np.sqrt(((point - X)**2).sum(axis=0)) <= tol):
return True
return False
def suggest(self, history, searchspace, max_tries=5):
if len(history) < self.seeds:
return RandomSearch().suggest(history, searchspace)
self.n_dims = searchspace.n_dims
X, Y, V, ignore = self._get_data(history, searchspace)
# TODO make _create_kernel accept optional args.
# Define and fit model
if self.surrogate == 'gp':
kernel = GaussianProcessKernel(self.kernel_params, self.n_dims)
model = MaximumLikelihoodGaussianProcess(X=X,
Y=Y,
kernel=kernel.kernel,
max_feval=self.max_feval)
else:
raise NotImplementedError(
'Surrogate model not recognised. Please choose from: gp')
model.fit()
# Define acquisition function and get best candidate
af = AcquisitionFunction(surrogate=model,
acquisition_params=self.acquisition_params,
n_dims=self.n_dims,
n_iter=self.n_iter,
max_iter=self.max_iter)
suggestion = af.get_best_candidate()
if suggestion in ignore or self._is_within(suggestion, X):
return RandomSearch().suggest(history, searchspace)
return self._from_unit(suggestion, searchspace)
class GridSearch(BaseStrategy):
short_name = 'grid'
def __init__(self):
self.param_grid = None
self.current = -1
def suggest(self, history, searchspace):
# Convert searchspace to param_grid
if self.param_grid is None:
if not all(isinstance(v, EnumVariable) for v in searchspace):
raise RuntimeError(
"GridSearchStrategy is defined only for all-enum search space"
)
self.param_grid = ParameterGrid(
dict((v.name, v.choices) for v in searchspace))
# NOTE: there is no way of signaling end of parameters to be searched against
# so user should pick correctly number of evaluations
self.current += 1
return self.param_grid[self.current % len(self.param_grid)]
| apache-2.0 |
rhoscanner-team/pcd-plotter | delaunay_example.py | 1 | 1435 | import numpy as np
from scipy.spatial import Delaunay
points = np.random.rand(30, 2) # 30 points in 2-d
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
# -- the same stuff for the convex hull
edges = set()
edge_points = []
for ia, ib in tri.convex_hull:
add_edge(ia, ib)
lines = LineCollection(edge_points)
plt.figure()
plt.title('Convex hull')
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
| gpl-2.0 |
remenska/rootpy | rootpy/plotting/contrib/plot_corrcoef_matrix.py | 5 | 12192 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from ...extern.six.moves import range
from ...extern.six import string_types
__all__ = [
'plot_corrcoef_matrix',
'corrcoef',
'cov',
]
def plot_corrcoef_matrix(matrix, names=None,
cmap=None, cmap_text=None,
fontsize=12, grid=False,
axes=None):
"""
This function will draw a lower-triangular correlation matrix
Parameters
----------
matrix : 2-dimensional numpy array/matrix
A correlation coefficient matrix
names : list of strings, optional (default=None)
List of the parameter names corresponding to the rows in ``matrix``.
cmap : matplotlib color map, optional (default=None)
Color map used to color the matrix cells.
cmap_text : matplotlib color map, optional (default=None)
Color map used to color the cell value text. If None, then
all values will be black.
fontsize : int, optional (default=12)
Font size of parameter name and correlation value text.
grid : bool, optional (default=False)
If True, then draw dashed grid lines around the matrix elements.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
Notes
-----
NumPy and matplotlib are required
Examples
--------
>>> matrix = corrcoef(data.T, weights=weights)
>>> plot_corrcoef_matrix(matrix, names)
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
if axes is None:
axes = plt.gca()
matrix = np.asarray(matrix)
if matrix.ndim != 2:
raise ValueError("matrix is not a 2-dimensional array or matrix")
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix is not square")
if names is not None and len(names) != matrix.shape[0]:
raise ValueError("the number of names does not match the number of "
"rows/columns in the matrix")
# mask out the upper triangular matrix
matrix[np.triu_indices(matrix.shape[0])] = np.nan
if isinstance(cmap_text, string_types):
cmap_text = cm.get_cmap(cmap_text, 201)
if cmap is None:
cmap = cm.get_cmap('jet', 201)
elif isinstance(cmap, string_types):
cmap = cm.get_cmap(cmap, 201)
# make NaN pixels white
cmap.set_bad('w')
axes.imshow(matrix, interpolation='nearest',
cmap=cmap, origin='upper',
vmin=-1, vmax=1)
axes.set_frame_on(False)
plt.setp(axes.get_yticklabels(), visible=False)
plt.setp(axes.get_yticklines(), visible=False)
plt.setp(axes.get_xticklabels(), visible=False)
plt.setp(axes.get_xticklines(), visible=False)
if grid:
# draw grid lines
for slot in range(1, matrix.shape[0] - 1):
# vertical
axes.plot((slot - 0.5, slot - 0.5),
(slot - 0.5, matrix.shape[0] - 0.5), 'k:', linewidth=1)
# horizontal
axes.plot((-0.5, slot + 0.5),
(slot + 0.5, slot + 0.5), 'k:', linewidth=1)
if names is not None:
for slot in range(1, matrix.shape[0]):
# diagonal
axes.plot((slot - 0.5, slot + 1.5),
(slot - 0.5, slot - 2.5), 'k:', linewidth=1)
# label cell values
for row, col in zip(*np.tril_indices(matrix.shape[0], k=-1)):
value = matrix[row][col]
if cmap_text is not None:
color = cmap_text((value + 1.) / 2.)
else:
color = 'black'
axes.text(
col, row,
"{0:d}%".format(int(value * 100)),
color=color,
ha='center', va='center',
fontsize=fontsize)
if names is not None:
# write parameter names
for i, name in enumerate(names):
axes.annotate(
name, (i, i),
rotation=45,
ha='left', va='bottom',
transform=axes.transData,
fontsize=fontsize)
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
import numpy as np
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
X = np.array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None), np.newaxis)
else:
axis = 1
tup = (np.newaxis, slice(None))
if y is not None:
y = np.array(y, copy=False, ndmin=2, dtype=float)
X = np.concatenate((X, y), axis)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if weights is not None:
weights = np.array(weights, dtype=float)
weights_sum = weights.sum()
if weights_sum <= 0:
raise ValueError(
"sum of weights is non-positive")
X -= np.average(X, axis=1-axis, weights=weights)[tup]
if repeat_weights:
# each weight represents a number of repetitions of an observation
# the total sample size can be determined in this case and we have
# both an unbiased and biased weighted covariance
fact = weights_sum - ddof
else:
# normalize weights so they sum to unity
weights /= weights_sum
# unbiased weighted covariance is not defined if the weights are
# not integral frequencies (repeat-type)
fact = (1. - np.power(weights, 2).sum())
else:
weights = 1
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
fact = float(N - ddof)
if not rowvar:
return (np.dot(weights * X.T, X.conj()) / fact).squeeze()
else:
return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None, weights=None,
repeat_weights=0):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
import numpy as np
c = cov(x, y, rowvar, bias, ddof, weights, repeat_weights)
if c.size == 0:
# handle empty arrays
return c
try:
d = np.diag(c)
except ValueError: # scalar covariance
return 1
return c / np.sqrt(np.multiply.outer(d, d))
| gpl-3.0 |
nathanshartmann/portuguese_word_embeddings | sentence_similarity.py | 1 | 3369 |
"""
This script evaluates a embedding model in a semantic similarity perspective.
It uses the dataset of ASSIN sentence similarity shared task and the method
of Hartmann which achieved the best results in the competition.
ASSIN shared-task website:
http://propor2016.di.fc.ul.pt/?page_id=381
Paper of Hartmann can be found at:
http://www.linguamatica.com/index.php/linguamatica/article/download/v8n2-6/365
"""
from sklearn.linear_model import LinearRegression
from sentence_similarity.utils.assin_eval import read_xml, eval_similarity
from gensim.models import KeyedVectors
from xml.dom import minidom
from numpy import array
from os import path
import pickle
import argparse
DATA_DIR = 'sentence_similarity/data/'
TEST_DIR = path.join(DATA_DIR, 'assin-test-gold/')
def gensim_embedding_difference(data, field1, field2):
"""Calculate the similarity between the sum of all embeddings."""
distances = []
for pair in data:
e1 = [i if i in embeddings else 'unk' for i in pair[field1]]
e2 = [i if i in embeddings else 'unk' for i in pair[field2]]
distances.append([embeddings.n_similarity(e1, e2)])
return distances
def evaluate_testset(x, y, test):
"""Docstring."""
l_reg = LinearRegression()
l_reg.fit(x, y)
test_predict = l_reg.predict(test)
return test_predict
def write_xml(filename, pred):
"""Docstring."""
with open(filename) as fp:
xml = minidom.parse(fp)
pairs = xml.getElementsByTagName('pair')
for pair in pairs:
pair.setAttribute('similarity', str(pred[pairs.index(pair)]))
with open(filename, 'w') as fp:
fp.write(xml.toxml())
if __name__ == '__main__':
# Parser descriptors
parser = argparse.ArgumentParser(
description='''Sentence similarity evaluation for word embeddings in
brazilian and european variants of Portuguese language. It is expected
a word embedding model in text format.''')
parser.add_argument('embedding',
type=str,
help='embedding model')
parser.add_argument('lang',
choices=['br', 'eu'],
help='{br, eu} choose PT-BR or PT-EU testset')
args = parser.parse_args()
lang = args.lang
emb = args.embedding
# Loading embedding model
embeddings = KeyedVectors.load_word2vec_format(emb,
binary=False,
unicode_errors="ignore")
# Loading evaluation data and parsing it
with open('%sassin-pt%s-train.pkl' % (DATA_DIR, lang), 'rb') as fp:
data = pickle.load(fp)
with open('%sassin-pt%s-test-gold.pkl' % (DATA_DIR, lang), 'rb') as fp:
test = pickle.load(fp)
# Getting features
features = gensim_embedding_difference(data, 'tokens_t1', 'tokens_t2')
features_test = gensim_embedding_difference(test, 'tokens_t1', 'tokens_t2')
# Predicting similarities
results = array([float(i['result']) for i in data])
results_test = evaluate_testset(features, results, features_test)
write_xml('%soutput.xml' % DATA_DIR, results_test)
# Evaluating
pairs_gold = read_xml('%sassin-pt%s-test.xml' % (TEST_DIR, lang), True)
pairs_sys = read_xml('%soutput.xml' % DATA_DIR, True)
eval_similarity(pairs_gold, pairs_sys)
| gpl-3.0 |
hetajen/vnpy161 | vn.trader/ctaStrategy/ctaBacktesting.py | 1 | 40890 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
History
<id> <author> <description>
2017051200 hetajen 样例:策略回测和优化
'''
from __future__ import division
'''2017051200 Add by hetajen begin'''
import time
'''2017051200 Add by hetajen end'''
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import pymongo
from ctaBase import *
from vtConstant import *
from vtGateway import VtOrderData, VtTradeData
from vtFunction import loadMongoSetting
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.startDate = ''
self.initDays = 0
self.endDate = ''
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.priceTick = 0 # 价格最小变动
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
#self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
#self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.startDate = startDate
self.initDays = initDays
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
self.endDate = endDate
if endDate:
self.dataEndDate= datetime.strptime(endDate, '%Y%m%d')
# 若不修改时间则会导致不包含dataEndDate当天数据
self.dataEndDate.replace(hour=23, minute=59)
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port, logging = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
self.initData = [] # 清空initData列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' %(initCursor.count() + self.dbCursor.count()))
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = self.roundToPriceTick(price)
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = self.roundToPriceTick(price)
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 判断是否会成交
buyCross = (order.direction==DIRECTION_LONG and
order.price>=buyCrossPrice and
buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交
sellCross = (order.direction==DIRECTION_SHORT and
order.price<=sellCrossPrice and
sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
if stopOrderID in self.workingStopOrderDict:
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print str(datetime.now()) + "\t" + content
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
tradeTimeList = [] # 每笔成交时间戳
posList = [0] # 每笔成交后的持仓情况
for trade in self.tradeDict.values():
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([-1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
posList.extend([1,0])
tradeTimeList.extend([result.entryDt, result.exitDt])
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = 0 # 这里把数据都初始化为0
averageLosing = 0
profitLossRatio = 0
if winningResult:
averageWinning = totalWinning/winningResult # 平均每笔盈利
if losingResult:
averageLosing = totalLosing/losingResult # 平均每笔亏损
if averageLosing:
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
d['posList'] = posList
d['tradeTimeList'] = tradeTimeList
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning']))
self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
import matplotlib.pyplot as plt
import numpy as np
try:
import seaborn as sns # 如果安装了seaborn则设置为白色风格
sns.set_style('whitegrid')
except ImportError:
pass
pCapital = plt.subplot(4, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'], color='r', lw=0.8)
pDD = plt.subplot(4, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g')
pPnl = plt.subplot(4, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50, color='c')
pPos = plt.subplot(4, 1, 4)
pPos.set_ylabel("Position")
if d['posList'][-1] == 0:
del d['posList'][-1]
tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']]
xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10))
tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex)
pPos.plot(d['posList'], color='k', drawstyle='steps-pre')
pPos.set_ylim(-1.2, 1.2)
plt.sca(pPos)
plt.tight_layout()
plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15
plt.show()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def setPriceTick(self, priceTick):
"""设置价格最小变动"""
self.priceTick = priceTick
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
d = self.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
return result
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
#----------------------------------------------------------------------
def runParallelOptimization(self, strategyClass, optimizationSetting):
"""并行优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 多进程优化,启动一个对应CPU核心数量的进程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
l = []
for setting in settingList:
l.append(pool.apply_async(optimize, (strategyClass, setting,
targetName, self.mode,
self.startDate, self.initDays, self.endDate,
self.slippage, self.rate, self.size,
self.dbName, self.symbol)))
pool.close()
pool.join()
# 显示结果
resultList = [res.get() for res in l]
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
#----------------------------------------------------------------------
def roundToPriceTick(self, price):
"""取整价格到合约最小价格变动"""
if not self.priceTick:
return price
newPrice = round(price/self.priceTick, 0) * self.priceTick
return newPrice
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end=None, step=None):
"""增加优化参数"""
if end is None and step is None:
self.paramDict[name] = [start]
return
if end < start:
print u'参数起始点必须不大于终止点'
return
if step <= 0:
print u'参数布进必须大于0'
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
rn = round(n, 2) # 保留两位小数
return format(rn, ',') # 加上千分符
#----------------------------------------------------------------------
def optimize(strategyClass, setting, targetName,
mode, startDate, initDays, endDate,
slippage, rate, size,
dbName, symbol):
"""多进程优化时跑在每个进程中运行的函数"""
engine = BacktestingEngine()
engine.setBacktestingMode(mode)
engine.setStartDate(startDate, initDays)
engine.setEndDate(endDate)
engine.setSlippage(slippage)
engine.setRate(rate)
engine.setSize(size)
engine.setDatabase(dbName, symbol)
engine.initStrategy(strategyClass, setting)
engine.runBacktesting()
d = engine.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
return (str(setting), targetValue)
'''2017051200 Modify by hetajen begin'''
from strategy.strategyAtrRsi import AtrRsiStrategy
def getEngine():
engine = BacktestingEngine()
engine.setBacktestingMode(engine.BAR_MODE) # 引擎的回测模式为K线
engine.setStartDate('20120101') # 回测用的数据起始日期
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
return engine
def getParam(type=0):
if type == 0:
setting = {'atrLength': 11}
else:
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
return setting
def xh_backTesting():
engine = getEngine()
setting = getParam()
engine.initStrategy(AtrRsiStrategy, setting)
engine.runBacktesting() # 开始跑回测
engine.showBacktestingResult() # 显示回测结果
def xh_optimize():
engine = getEngine()
setting = getParam(1)
engine.runOptimization(AtrRsiStrategy, setting) # 单进程优化。耗时:xxx秒
#engine.runParallelOptimization(AtrRsiStrategy, setting) # 多进程优化。耗时:xx秒
if __name__ == '__main__':
start = time.time()
xh_backTesting()
xh_optimize()
print u'耗时:%s' % (time.time() - start) # 性能测试
'''2017051200 Modify by hetajen end'''
| mit |
kylerbrown/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
aureooms/networkx | examples/algorithms/blockmodel.py | 12 | 3014 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <[email protected]>',
'Aric Hagberg <[email protected]>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H = next(nx.connected_component_subgraphs(G))
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/testing/compare.py | 11 | 12935 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np.tostring(), width, height, output)
| mit |
ishank08/scikit-learn | sklearn/datasets/tests/test_base.py | 16 | 9390 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/utils/tests/test_extmath.py | 2 | 8819 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in xrange(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
| bsd-3-clause |
DGrady/pandas | asv_bench/benchmarks/io_sql.py | 7 | 4120 | import sqlalchemy
from .pandas_vb_common import *
import sqlite3
from sqlalchemy import create_engine
#-------------------------------------------------------------------------------
# to_sql
class WriteSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_fallback(self):
self.df.to_sql('test1', self.con, if_exists='replace')
def time_sqlalchemy(self):
self.df.to_sql('test1', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# read_sql
class ReadSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_sql('test2', self.engine, if_exists='replace')
self.df.to_sql('test2', self.con, if_exists='replace')
def time_read_query_fallback(self):
read_sql_query('SELECT * FROM test2', self.con)
def time_read_query_sqlalchemy(self):
read_sql_query('SELECT * FROM test2', self.engine)
def time_read_table_sqlalchemy(self):
read_sql_table('test2', self.engine)
#-------------------------------------------------------------------------------
# type specific write
class WriteSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'string': (['foo'] * 10000), 'bool': ([True] * 10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df.loc[1000:3000, 'float'] = np.nan
def time_string_fallback(self):
self.df[['string']].to_sql('test_string', self.con, if_exists='replace')
def time_string_sqlalchemy(self):
self.df[['string']].to_sql('test_string', self.engine, if_exists='replace')
def time_float_fallback(self):
self.df[['float']].to_sql('test_float', self.con, if_exists='replace')
def time_float_sqlalchemy(self):
self.df[['float']].to_sql('test_float', self.engine, if_exists='replace')
def time_datetime_sqlalchemy(self):
self.df[['datetime']].to_sql('test_datetime', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# type specific read
class ReadSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df['datetime_string'] = self.df['datetime'].map(str)
self.df.to_sql('test_type', self.engine, if_exists='replace')
self.df[['float', 'datetime_string']].to_sql('test_type', self.con, if_exists='replace')
def time_datetime_read_and_parse_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime_string'], parse_dates=['datetime_string'])
def time_datetime_read_as_native_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime'])
def time_float_read_query_fallback(self):
read_sql_query('SELECT float FROM test_type', self.con)
def time_float_read_query_sqlalchemy(self):
read_sql_query('SELECT float FROM test_type', self.engine)
def time_float_read_table_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['float'])
| bsd-3-clause |
merenlab/anvio | anvio/drivers/MODELLER.py | 1 | 40930 | # coding: utf-8
"""
Interface to MODELLER (https://salilab.org/modeller/).
"""
import os
import anvio
import shutil
import argparse
import subprocess
import pandas as pd
import anvio.utils as utils
import anvio.fastalib as u
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.filesnpaths as filesnpaths
from anvio.drivers import diamond
from anvio.errors import ConfigError, ModellerError, ModellerScriptError, FilesNPathsError
__author__ = "Evan Kiefl"
__copyright__ = "Copyright 2016, The anvio Project"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Evan Kiefl"
__email__ = "[email protected]"
up_to_date_modeller_exec = "mod9.23" # default exec to use
J = lambda x, y: os.path.join(x, y)
class MODELLER:
"""Driver class for MODELLER
This class is a driver to run MODELLER scripts. MODELLER scripts are written
in python 2.3 which is the language MODELLER used when this driver was written.
Parameters
==========
args : argparse.Namespace object
Check __init__ for allowable attributes
target_fasta_path: str
Path to amino acid sequence fasta file with 1 sequence, the gene to be modelled. The defline
should be an integer (This class will assume this integer is the genes gene caller id)
directory: str, None
Path to directory that MODELLER will be run in. If None, temp dir will be created
lazy_init : bool, False
If True, check_MODELLER will not be called
skip_warnings : bool, False
If True, all warnings will be suppressed
Notes
=====
- You can add MODELLER scripts by storing them in anvio/data/misc/MODELLER/scripts. Each script
should have its own function in this class. For example, align_to_templates.py is a script
anvi'o has found in that directory and has a corresponding function in this class called
self.run_align_to_templates. Please see that method if you want to add your own script.
"""
def __init__(self, args, target_fasta_path, directory=None, run=terminal.Run(),
lazy_init=False, skip_warnings=False, check_db_only=False):
self.args = args
self.run = run
if skip_warnings and not anvio.DEBUG:
self.run.verbose = False
self.lazy_init = lazy_init
self.check_db_only = check_db_only
self.target_fasta_path = target_fasta_path
self.directory = directory if directory else filesnpaths.get_temp_directory_path()
A = lambda x, t: t(args.__dict__[x]) if x in self.args.__dict__ else None
null = lambda x: x
self.scoring_method = A('scoring_method', str) or 'DOPE_score'
self.very_fast = A('very_fast', bool) or False
self.executable = A('modeller_executable', null) or up_to_date_modeller_exec
self.num_models = A('num_models', int) or 5
self.modeller_database = A('modeller_database', str) or 'pdb_95'
self.max_number_templates = A('max_number_templates', null) or 5
self.percent_cutoff = A('percent_cutoff', null) or 30
self.alignment_fraction_cutoff = A('alignment_fraction_cutoff', null) or 0.80
self.deviation = A('deviation', null) or 4
self.pdb_db_path = A('pdb_db', null)
self.offline_mode = A('offline_mode', null)
# All MODELLER scripts are housed in self.script_folder
self.scripts_folder = constants.default_modeller_scripts_dir
self.alignment_pap_path = None
self.alignment_pir_path = None
self.get_template_path = None
self.target_pir_path = None
self.template_family_matrix_path = None
self.template_info_path = None
self.template_pdb_dir = None
self.model_info = None
self.pdb_db = None
self.use_pdb_db = False
self.logs = {}
self.scripts = {}
# All MODELLER databases are housed in self.database_dir
self.database_dir = constants.default_modeller_database_dir
# store the original directory so we can cd back and forth between
# self.directory and self.start_dir
self.start_dir = os.getcwd()
if self.check_db_only:
self.check_database()
return
self.sanity_check()
self.corresponding_gene_call = self.get_corresponding_gene_call_from_target_fasta_path()
# as reward, whoever called this class will receive self.out when they run self.process()
self.out = {
"templates" : {"pdb_id": [], "chain_id": [], "proper_percent_similarity": [], "percent_similarity": [], "align_fraction":[]},
"models" : {"molpdf": [], "GA341_score": [], "DOPE_score": [], "picked_as_best": []},
"corresponding_gene_call" : self.corresponding_gene_call,
"structure_exists" : False,
"best_model_path" : None,
"best_score" : None,
"scoring_method" : self.scoring_method,
"percent_cutoff" : self.percent_cutoff,
"alignment_fraction_cutoff" : self.alignment_fraction_cutoff,
"very_fast" : self.very_fast,
"deviation" : self.deviation,
"directory" : self.directory,
}
# copy fasta into the working directory
try:
shutil.copy2(self.target_fasta_path, self.directory)
self.target_fasta_path = J(self.directory, self.target_fasta_path)
except shutil.SameFileError:
pass
def get_corresponding_gene_call_from_target_fasta_path(self):
"""corresponding_gene_call is assumed to be the defline of self.args.target_fasta_path"""
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
while next(target_fasta):
corresponding_gene_call = target_fasta.id
target_fasta.close()
return corresponding_gene_call
def load_pdb_db(self):
"""Try loading a PDB database with path equal to self.pdb_db_path
Modifies self.pdb_db and self.use_pdb_db
"""
if not self.pdb_db_path:
self.pdb_db_path = constants.default_pdb_database_path
ok_if_absent = True if self.pdb_db_path == constants.default_pdb_database_path else False
if filesnpaths.is_file_exists(self.pdb_db_path, dont_raise=ok_if_absent):
# The user has a database there! Try and load it
self.pdb_db = anvio.structureops.PDBDatabase(argparse.Namespace(pdb_database_path=self.pdb_db_path))
self.pdb_db.check_or_create_db()
self.pdb_db.get_stored_structure_ids()
self.use_pdb_db = True
else:
self.use_pdb_db = False
def process(self):
timer = terminal.Timer()
try:
self.load_pdb_db()
timer.make_checkpoint('PDB DB loaded')
self.run_fasta_to_pir()
timer.make_checkpoint('Converted gene FASTA to PIR')
self.check_database()
timer.make_checkpoint('Checked databases')
self.run_search_and_parse_results()
timer.make_checkpoint('Ran DIAMOND search and parsed hits')
self.get_structures()
timer.make_checkpoint('Obtained template structures')
self.run_align_to_templates(self.list_of_template_code_and_chain_ids)
timer.make_checkpoint('Sequence aligned to templates')
self.run_get_model(self.num_models, self.deviation, self.very_fast)
timer.make_checkpoint('Ran structure predictions')
self.tidyup()
self.pick_best_model()
self.run_add_chain_identifiers_to_best_model()
timer.make_checkpoint('Picked best model and tidied up')
self.out["structure_exists"] = True
except self.EndModeller:
pass
except ModellerScriptError as e:
print(e)
finally:
timer.gen_report(title='ID %s Time Report' % str(self.corresponding_gene_call), run=self.run)
self.abort()
return self.out
def get_structures(self):
"""Populate self.template_pdb_dir with template structure PDBs"""
self.template_pdb_dir = os.path.join(self.directory, "%s_TEMPLATE_PDBS" % str(self.corresponding_gene_call))
filesnpaths.gen_output_directory(self.template_pdb_dir) # does nothing if already exists
pdb_paths = {}
for code, chain in self.list_of_template_code_and_chain_ids:
five_letter_id = code + chain
requested_path = J(self.template_pdb_dir, '%s.pdb' % code)
if self.use_pdb_db and five_letter_id in self.pdb_db.stored_structure_ids:
# This chain exists in the external database. Export it and get the path
try:
path = self.pdb_db.export_pdb(five_letter_id, requested_path)
source = 'Offline DB'
except ConfigError:
# The ID is in the DB, but the PDB content is None
path = None
source = 'Nowhere'
elif not self.offline_mode:
# This chain doesn't exist in an external database, and internet access is assumed.
# We try and download the protein from the RCSB PDB server. If downloading fails,
# path is None
path = utils.download_protein_structure(code, chain=chain, output_path=requested_path, raise_if_fail=False)
source = 'RCSB PDB Server'
else:
# Internet access is not assumed, and the chain wasn't in the external database
path = None
source = 'Nowhere'
self.run.info('%s obtained from' % five_letter_id, source)
if path:
pdb_paths[five_letter_id] = path
# remove templates whose structures are not available
self.list_of_template_code_and_chain_ids = [
(code, chain_code)
for code, chain_code in self.list_of_template_code_and_chain_ids
if code + chain_code in pdb_paths
]
if not len(self.list_of_template_code_and_chain_ids):
self.run.warning("No structures of the homologous proteins (templates) were available. Probably something "
"is wrong. Stopping here.")
raise self.EndModeller
self.run.info("Structures obtained for", ", ".join([code[0]+code[1] for code in self.list_of_template_code_and_chain_ids]))
def sanity_check(self, skip_warnings=False):
# the directory files will be dumped into (can exist but must be empty)
if filesnpaths.is_file_exists(self.directory, dont_raise=True):
filesnpaths.is_output_dir_writable(self.directory)
if not filesnpaths.is_dir_empty(self.directory):
raise ModellerError("You cannot give MODELLER a non-empty directory to work in.")
else:
filesnpaths.gen_output_directory(self.directory)
if not self.lazy_init:
self.executable = check_MODELLER(self.executable)
# does target_fasta_path point to a fasta file?
utils.filesnpaths.is_file_fasta_formatted(self.target_fasta_path)
# make sure target_fasta is valid
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
if target_fasta.total_seq != 1:
raise ConfigError("MODELLER :: The input FASTA file must have exactly one sequence. "
"You provided one with {}.".format(target_fasta.total_seq))
try:
while next(target_fasta):
int(target_fasta.id)
except:
raise ConfigError("MODELLER :: The defline of this fasta file must be an integer")
target_fasta.close()
# parameter consistencies
if self.deviation < 0.5 or self.deviation > 20:
self.run.warning("You realize that deviation is given in angstroms, right? You chose {}".format(self.deviation))
if self.very_fast and self.num_models > 1:
self.num_models = 1
self.run.warning("Since you chose --very-fast, there will be little difference, if at all, between models. Anvi'o "
"authoritatively sets --num-models to 1 to save you time.")
def pick_best_model(self):
"""Pick best model based on self.scoring_method and rename to gene_<corresponding_gene_call>.pdb"""
# initialize new model_info column
self.model_info["picked_as_best"] = False
# For these scores, lower is better
if self.scoring_method in ["molpself.model_info", "DOPE_score"]:
best_basename = self.model_info.loc[self.model_info[self.scoring_method].idxmin(axis=0), "name"]
self.model_info.loc[self.model_info[self.scoring_method].idxmin(axis=0), "picked_as_best"] = True
# For these scores, higher is better
if self.scoring_method == "GA341_score":
best_basename = self.model_info.loc[self.model_info[self.scoring_method].idxmax(axis=0), "name"]
self.model_info.loc[self.model_info[self.scoring_method].idxmax(axis=0), "picked_as_best"] = True
new_best_file_path = J(self.directory, "gene_{}.pdb".format(self.corresponding_gene_call))
os.rename(J(self.directory, best_basename), new_best_file_path)
# append model information to self.out
for model_index in self.model_info.index:
self.out["models"]["molpdf"].append(self.model_info.loc[model_index, "molpdf"])
self.out["models"]["GA341_score"].append(self.model_info.loc[model_index, "GA341_score"])
self.out["models"]["DOPE_score"].append(self.model_info.loc[model_index, "DOPE_score"])
self.out["models"]["picked_as_best"].append(self.model_info.loc[model_index, "picked_as_best"])
# append pdb path to self.out
self.out["best_model_path"] = new_best_file_path
# append the best score to self.out
self.out["best_score"] = self.model_info.loc[self.model_info["picked_as_best"] == True, self.scoring_method]
def abort(self):
"""Gene was not modelled. Return to the starting directory"""
os.chdir(self.start_dir)
def tidyup(self):
"""Tidyup operations after running get_model.py
Some of the files in here are unnecessary, some of the names are disgusting. rename from
"2.B99990001.pdb" to "gene_2_Model001.pdb" if normal model. Rename from "cluster.opt" to
"gene_2_ModelAvg.pdb"
"""
if not "get_model.py" in self.scripts.keys():
raise ConfigError("You are out of line calling tidyup without running get_model.py")
# remove all copies of all scripts that were ran
for script_name, file_path in self.scripts.items():
os.remove(file_path)
for model in self.model_info.index:
basename = self.model_info.loc[model, "name"]
# The default names are bad. This is where they are defined
if basename == "cluster.opt":
new_basename = "gene_{}_ModelAvg.pdb".format(self.corresponding_gene_call)
else:
model_num = os.path.splitext(basename)[0][-3:]
new_basename = "gene_{}_Model{}.pdb".format(self.corresponding_gene_call, model_num)
# rename the files (an reflect changes in self.model_info)
file_path = J(self.directory, basename)
new_file_path = J(self.directory, new_basename)
os.rename(file_path, new_file_path)
self.model_info.loc[model, "name"] = new_basename
def run_add_chain_identifiers_to_best_model(self):
"""Add chain identifier to best model to appease some third-party services"""
script_name = "add_chain_identifiers_to_best_model.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
dir_name, base_name = os.path.split(self.out['best_model_path'])
command = [self.executable,
script_name,
dir_name,
base_name]
self.run_command(command, script_name=script_name)
def run_get_model(self, num_models, deviation, very_fast):
"""Run get model
This is the magic of MODELLER. Based on the template alignment file, the structures of the
templates, and satisfaction of physical constraints, the target protein structure is
modelled without further user input.
"""
script_name = "get_model.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# model info
self.model_info_path = J(self.directory, "gene_{}_ModelInfo.txt".format(self.corresponding_gene_call))
self.run.info("Number of models", num_models)
self.run.info("Deviation", str(deviation) + " angstroms")
self.run.info("Fast optimization", str(very_fast))
if not deviation and num_models > 1:
raise ConfigError("run_get_modeli :: deviation must be > 0 if num_models > 1.")
command = [self.executable,
script_name,
self.alignment_pir_path,
self.corresponding_gene_call,
self.template_info_path,
str(num_models),
str(deviation),
str(int(very_fast)),
self.model_info_path]
self.run_command(command,
script_name = script_name,
check_output = [self.model_info_path])
# load the model results information as a dataframe
self.model_info = pd.read_csv(self.model_info_path, sep="\t", index_col=False)
self.run.info("Model info", os.path.basename(self.model_info_path))
def run_align_to_templates(self, templates_info):
"""Align the sequence to the best candidate protein sequences
After identifying best candidate proteins based on sequence data, this function aligns the
protein. This alignment file is the main input (besides structures) for the homology
protein.
Parameters
==========
templates_info : list of 2-tuples
The zeroth element is the 4-letter protein code and the first element is the chain
number. E.g. [('4sda', 'A'), ('iq8p', 'E')]
"""
script_name = "align_to_templates.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# First, write ids and chains to file read by align_to_templates.py MODELLER script
self.template_info_path = J(self.directory, "gene_{}_BestTemplateIDs.txt".format(self.corresponding_gene_call))
f = open(self.template_info_path, "w")
for match in templates_info:
f.write("{}\t{}\n".format(match[0], match[1]))
f.close()
# name of the output. .pir is the standard format for MODELLER, .pap is human readable
# protein_family computes a matrix comparing the different templates agianst one another
self.alignment_pir_path = J(self.directory, "gene_{}_Alignment.ali".format(self.corresponding_gene_call))
self.alignment_pap_path = J(self.directory, "gene_{}_Alignment.pap".format(self.corresponding_gene_call))
self.template_family_matrix_path = J(self.directory, "gene_{}_ProteinFamily.mat".format(self.corresponding_gene_call))
command = [self.executable,
script_name,
self.target_pir_path,
self.corresponding_gene_call,
self.template_info_path,
self.alignment_pir_path,
self.alignment_pap_path,
self.template_family_matrix_path]
self.run_command(command,
script_name = script_name,
check_output = [self.alignment_pir_path,
self.alignment_pap_path,
self.template_family_matrix_path])
self.run.info("Similarity matrix of templates", os.path.basename(self.template_family_matrix_path))
self.run.info("Target alignment to templates", ", ".join([os.path.basename(self.alignment_pir_path),
os.path.basename(self.alignment_pap_path)]))
def run_search_and_parse_results(self):
"""Align the protein against the database based on only sequence"""
# Change to MODELLER working directory
os.chdir(self.directory)
columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gaps', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
driver = diamond.Diamond(
query_fasta=self.target_fasta_path,
target_fasta=J(self.database_dir, self.modeller_database + '.dmnd'),
outfmt=' '.join(['6'] + columns),
run=terminal.Run(verbose=False),
progress=terminal.Progress(verbose=False),
)
driver.blastp()
# Change back to user directory
os.chdir(self.start_dir)
search_df = driver.view_as_dataframe(J(self.directory, driver.tabular_output_path))
matches_found = search_df.shape[0]
if not matches_found:
self.run.warning("No proteins with homologous sequence were found for {}. No structure will be modelled".format(self.corresponding_gene_call))
raise self.EndModeller
# We need the gene length for pident
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
while next(target_fasta):
gene_length = len(target_fasta.seq)
# add some useful columns
search_df["code"] = search_df["sseqid"].str[:-1]
search_df["chain"] = search_df["sseqid"].str[-1]
search_df["align_fraction"] = (search_df["length"] - search_df["gaps"]) / gene_length
search_df["proper_pident"] = search_df["pident"] * search_df["align_fraction"]
# Find best match for align fraction and pident
code_chain_id_of_best = tuple(search_df.iloc[search_df['proper_pident'].argmax()][['code', 'chain']].values)
best_hit = search_df.loc[
(search_df['code'] == code_chain_id_of_best[0]) & \
(search_df['chain'] == code_chain_id_of_best[1]), ['pident', 'align_fraction']
].iloc[0]
# filter results by self.percent_cutoff and self.alignment_fraction_cutoff
search_df = search_df[search_df["pident"] >= self.percent_cutoff]
search_df = search_df[search_df["align_fraction"] >= self.alignment_fraction_cutoff]
# Rank by the alignment fraction times the percent id
search_df = search_df.sort_values("proper_pident", ascending=False)
# If more than 1 template in 1 PDB id, just choose 1
search_df = search_df.drop_duplicates('code', keep='first')
matches_after_filter = len(search_df)
if not matches_after_filter:
self.run.warning("Gene {} did not have a search result with percent identicalness above or equal "
"to {}% and alignment fraction above {}%. The best match was chain {} of https://www.rcsb.org/structure/{}, which had a "
"percent identicalness of {:.2f}% and an alignment fraction of {:.3f}. No structure will be modelled.".\
format(self.corresponding_gene_call,
self.percent_cutoff,
self.alignment_fraction_cutoff,
code_chain_id_of_best[1],
code_chain_id_of_best[0],
best_hit['pident'],
best_hit['align_fraction']))
raise self.EndModeller
# Filter out templates with proper_pident more than 5% less than best match
# http://merenlab.org/2018/09/04/getting-started-with-anvi-structure/#how-much-do-templates-matter
search_df = search_df[search_df['proper_pident'] >= (search_df['proper_pident'].max() - 5)]
# get up to self.modeller.max_number_templates of those with the highest proper_ident scores.
search_df = search_df.iloc[:min([len(search_df), self.max_number_templates])]
# Get their chain and 4-letter ids
self.list_of_template_code_and_chain_ids = list(zip(search_df["code"], search_df["chain"]))
self.run.info("Max number of templates allowed", self.max_number_templates)
self.run.info("Number of candidate templates", matches_found)
self.run.info("After >{}% identical filter".format(self.percent_cutoff), matches_after_filter)
self.run.info("Number accepted as templates", len(self.list_of_template_code_and_chain_ids))
# update user on which templates are used, and write the templates to self.out
for i in range(len(self.list_of_template_code_and_chain_ids)):
pdb_id, chain_id = self.list_of_template_code_and_chain_ids[i]
proper_percent_similarity = search_df["proper_pident"].iloc[i]
percent_similarity = search_df["pident"].iloc[i]
align_fraction = search_df["align_fraction"].iloc[i]
self.out["templates"]["pdb_id"].append(pdb_id)
self.out["templates"]["chain_id"].append(chain_id)
self.out["templates"]["proper_percent_similarity"].append(proper_percent_similarity)
self.out["templates"]["percent_similarity"].append(percent_similarity)
self.out["templates"]["align_fraction"].append(align_fraction)
self.run.info("Template {}".format(i+1),
"Protein ID: {}, Chain {} ({:.1f}% identical, {:.2f} align fraction)".format(pdb_id, chain_id, percent_similarity, align_fraction))
def check_database(self):
"""Setup the database files
Downloads the .pir file if it is missing
Binarizes .pir file if .bin is missing
Creates the .dmnd file if it is missing
"""
bin_db_path = J(self.database_dir, self.modeller_database + ".bin")
pir_db_path = J(self.database_dir, self.modeller_database + ".pir")
bin_exists = utils.filesnpaths.is_file_exists(bin_db_path, dont_raise=True)
pir_exists = utils.filesnpaths.is_file_exists(pir_db_path, dont_raise=True)
if bin_exists and pir_exists:
# We good
pass
else:
if not pir_exists:
# Download .pir
self.run.warning("Anvi'o looked in {} for a database with the name {} and with an extension "
"of either .bin or .pir, but didn't find anything matching that "
"criteria. Anvi'o will try and download the best database it knows of from "
"https://salilab.org/modeller/downloads/pdb_95.pir.gz and use that. "
"You can checkout https://salilab.org/modeller/ for more info about the pdb_95 "
"database".format(self.database_dir, self.modeller_database))
db_download_path = os.path.join(self.database_dir, "pdb_95.pir.gz")
utils.download_file("https://salilab.org/modeller/downloads/pdb_95.pir.gz", db_download_path)
utils.run_command(['gzip', '-d', db_download_path], log_file_path=filesnpaths.get_temp_file_path())
# Binarize .pir (make .bin)
self.run.warning("Your database is not in binary format. That means accessing its contents is slower "
"than it could be. Anvi'o is going to make a binary format. Just FYI")
self.run_binarize_database(pir_db_path, bin_db_path)
dmnd_db_path = J(self.database_dir, self.modeller_database + '.dmnd')
if os.path.exists(dmnd_db_path):
return
self.run.warning("Your diamond database does not exist. It will be created.")
script_name = "pir_to_fasta.py"
self.copy_script_to_directory(script_name)
input_pir_path = J(self.database_dir, self.modeller_database + '.pir')
fasta_path = J(self.database_dir, self.modeller_database + '.fa')
dmnd_path = J(self.database_dir, self.modeller_database)
command = [self.executable,
script_name,
input_pir_path,
fasta_path]
self.run_command(command,
script_name=script_name,
rename_log=False)
temp = u.FastaOutput(filesnpaths.get_temp_file_path())
fasta = u.SequenceSource(fasta_path)
while next(fasta):
temp.write_id(fasta.id)
temp.write_seq(fasta.seq.replace('-', '').replace('.', 'X'))
shutil.move(temp.output_file_path, fasta_path)
fasta.close()
temp.close()
driver = diamond.Diamond(
query_fasta=fasta_path,
run=terminal.Run(verbose=False),
progress=terminal.Progress(verbose=False),
)
driver.makedb(output_file_path=dmnd_path)
os.remove(fasta_path)
def run_binarize_database(self, pir_db_path, bin_db_path):
"""Binarizes a .pir file
Databases can be read in .pir format, but can be more quickly read in binarized format. This
does that.
Parameters
==========
pir_db_path : str
Path to existing .pir file
bin_db_path : str
Path to the will-be-made .bin file
"""
script_name = "binarize_database.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
command = [self.executable,
script_name,
pir_db_path,
bin_db_path]
self.run_command(command,
script_name=script_name,
check_output=[bin_db_path],
rename_log=False)
self.run.info("New database", bin_db_path)
def copy_script_to_directory(self, script_name, add_to_scripts_dict=True, directory=None):
"""Copy source script to working directory
All MODELLER scripts are housed in anvio/data/misc/MODELLER/scripts/. This function checks
that script_name is in anvio/data/misc/MODELLER/scripts/ and then copies the script into
self.directory. Why copy into self.directory? Whenever a script is ran by MODELLER, a log
file is output in the directory of the script. By copying the script into self.directory,
the log is written there instead of anvio/data/misc/MODELLER/scripts/.
"""
if not directory:
directory = self.directory
script_path = J(self.scripts_folder, script_name)
try:
utils.filesnpaths.is_file_exists(script_path)
except:
raise ConfigError("MODELLER :: The script {} is not in {}".format(script_name, self.scripts_folder))
# add script to scripts dictionary
if add_to_scripts_dict:
self.scripts[script_name] = J(directory, script_name)
# If all is well, copy script to directory
shutil.copy2(script_path, directory)
def run_fasta_to_pir(self):
"""Convert a fasta file to a pir format.
MODELLER uses their own .pir format for search and alignment instead of .fasta. This script
does the conversion. An example pir formatted sequence shown here:
>P1;TvLDH
sequence:TvLDH:::::::0.00: 0.00
MSEAAHVLITGAAGQIGYILSHWIASGELYGDRQVYLHLLDIPPAMNRLTALTMELEDCAFPHLAGFVATTDPKA
AFKDIDCAFLVASMPLKPGQVRADLISSNSVIFKNTGEYLSKWAKPSVKVLVIGNPDNTNCEIAMLHAKNLKPEN
FSSLSMLDQNRAYYEVASKLGVDVKDVHDIIVWGNHGESMVADLTQATFTKEGKTQKVVDVLDHDYVFDTFFKKI
GHRAWDILEHRGFTSAASPTKAAIQHMKAWLFGTAPGEVLSMGIPVPEGNPYGIKPGVVFSFPCNVDKEGKIHVV
EGFKVNDWLREKLDFTEKDLFHEKEIALNHLAQGG*
You can find more details via https://salilab.org/modeller/tutorial/basic.html
"""
script_name = "fasta_to_pir.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# name pir file by the corresponding_gene_call (i.e. defline of the fasta)
self.target_pir_path = J(self.directory, "{}.pir".format(self.corresponding_gene_call))
command = [self.executable,
script_name,
self.target_fasta_path,
self.target_pir_path]
self.run_command(command,
script_name = script_name,
check_output = [self.target_pir_path])
self.run.info("Target alignment file", os.path.basename(self.target_pir_path))
def run_command(self, command, script_name, check_output=None, rename_log=True):
"""Base routine for running MODELLER scripts
Parameters
==========
command : list of strs
E.g. ['mod921', 'test_script.py', 'input1', 'input2'] corresponds to the command line
"mod9.21 test_script.py input1 input2"
script_name : str
E.g. 'test_script.py'
check_output : list, None
Verify that this list of filepaths exist after the command is ran
rename_log : bool, True
MODELLER outputs a log that is renamed to reflect the command and gene used
"""
# first things first, we CD into MODELLER's directory
os.chdir(self.directory)
# try and execute the command
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
# if MODELLER script gave a traceback, it is caught here and everything is stopped
if process.returncode:
error = error.decode('utf-8').strip()
error = "\n" + "\n".join(error.split('\n'))
print(terminal.c(error, color='red'))
if not self.check_db_only:
self.out["structure_exists"] = False
raise ModellerScriptError("The MODELLER script {} did not execute properly. Hopefully it is clear "
"from the above error message. No structure is going to be modelled."\
.format(script_name))
# If we made it this far, the MODELLER script ran to completion. Now check outputs exist
if check_output:
for output in check_output:
utils.filesnpaths.is_file_exists(output)
# MODELLER outputs a log that we rename right here, right now
old_log_name = os.path.splitext(script_name)[0] + ".log"
if rename_log:
new_log_name = "gene_{}_{}".format(self.corresponding_gene_call, old_log_name)
os.rename(old_log_name, new_log_name)
else:
new_log_name = old_log_name
# add to logs
self.logs[script_name] = new_log_name
self.run.info("Log of {}".format(script_name), new_log_name)
# last things last, we CD back into the starting directory
os.chdir(self.start_dir)
class EndModeller(Exception):
pass
def check_MODELLER(executable=None):
"""Test if MODELLER is going to work.
Checks the executable exists, that a license exists, and can produce the expected output of a
modeller executable. Exists outside of the class MODELLER so it does not have to be checked
everytime the class is initialized.
Parameters
==========
executable : str, None
The string representation of a binary MODELLER program. E.g "mod9.21". If None,
up_to_date_modeller_exec is chosen and tested.
Returns
=======
executable : str
Returns the executable that you _should_ use, which is not necessarily what is input
"""
executable = executable if executable else up_to_date_modeller_exec
scripts_folder = J(os.path.dirname(anvio.__file__), 'data/misc/MODELLER/scripts')
if utils.filesnpaths.is_dir_empty(scripts_folder):
raise ConfigError("Anvi'o houses all its MODELLER scripts in %s, but your directory "
"contains no scripts. Why you did dat?" % scripts_folder)
try:
utils.is_program_exists(executable)
except ConfigError:
*prefix, sub_version = up_to_date_modeller_exec.split('.')
prefix, sub_version = ''.join(prefix), int(sub_version)
for alternate_version in reversed(range(sub_version - 10, sub_version + 10)):
alternate_program = prefix + '.' + str(alternate_version)
if utils.is_program_exists(alternate_program, dont_raise=True):
executable = alternate_program
break
else:
raise ConfigError("Anvi'o needs a MODELLER program to be installed on your system. You didn't specify one "
"(which can be done with `--modeller-executable`), so anvi'o tried the most recent version "
"it knows about: '%s'. If you are certain you have it on your system (for instance you can run it "
"by typing '%s' in your terminal window), you may want to send a detailed bug report. If you "
"don't have it on your system, check out these installation instructions on our website: "
"http://merenlab.org/2016/06/18/installing-third-party-software/#modeller" % (executable, executable))
temp_dir = filesnpaths.get_temp_directory_path()
shutil.copy2(J(scripts_folder, 'fasta_to_pir.py'), temp_dir)
test_script = J(temp_dir, 'fasta_to_pir.py')
test_input = J(os.path.dirname(anvio.__file__), 'tests/sandbox/mock_data_for_structure/proteins.fa')
test_output = J(temp_dir, 'test_out')
command = [executable,
test_script,
test_input,
test_output]
# try and execute the command
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode:
# modeller has failed
error = error.decode('utf-8').strip()
is_licence_key_error = True if error.find('Invalid license key') > -1 else False
if is_licence_key_error:
# its a valid modeller program with no license key
license_target_file = error.split('\n')[-1]
raise ConfigError("You're making progress and anvi'o is proud of you! You just need to validate your MODELLER "
"with a license key (it's free). Please go to https://salilab.org/modeller/registration.html "
"to register for a new license. After you receive an e-mail with your key, please open '%s' "
"and replace the characters XXXXX with your own key. Save the file and try again. " % license_target_file)
else:
error = "\n" + "\n".join(error.split('\n'))
print(terminal.c(error, color='red'))
raise ConfigError("The executable you requested is called `%s`, but anvi'o doesn't agree with you that "
"it is a working MODELLER program. That was determined by running the command `%s`, which raised the "
"error seen above. If you want to specify a specific MODELLER program, you can specify it with "
"`--modeller-executable`." % (executable, " ".join(command)))
# no error was raised. now check if output file exists
try:
filesnpaths.is_file_exists(test_output)
except FilesNPathsError:
raise ConfigError("The executable you requested is called `%s`, but anvi'o doesn't agree with you that "
"it is a working MODELLER program. That was determined by running the command `%s`, which did not "
"output the file expected. If you want to specify a specific MODELLER program, you can specify it with "
"`--modeller-executable`." % (executable, " ".join(command)))
return executable
| gpl-3.0 |
shahankhatch/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
ChinaQuants/zipline | zipline/utils/data.py | 31 | 12761 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import numpy as np
import pandas as pd
from copy import deepcopy
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
| apache-2.0 |
PennyDreadfulMTG/Penny-Dreadful-Tools | decksite/charts/chart.py | 1 | 2940 | import os.path
import pathlib
from typing import Dict
import matplotlib as mpl
# This has to happen before pyplot is imported to avoid needing an X server to draw the graphs.
# pylint: disable=wrong-import-position
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from decksite.data import deck
from shared import configuration, logger
from shared.pd_exception import DoesNotExistException, OperationalException
def cmc(deck_id: int, attempts: int = 0) -> str:
if attempts > 3:
msg = 'Unable to generate cmc chart for {id} in 3 attempts.'.format(id=deck_id)
logger.error(msg)
raise OperationalException(msg)
path = determine_path(str(deck_id) + '-cmc.png')
if acceptable_file(path):
return path
d = deck.load_deck(deck_id)
costs: Dict[str, int] = {}
for ci in d.maindeck:
c = ci.card
if c.is_land():
continue
if c.mana_cost is None:
cost = '0'
elif next((s for s in c.mana_cost if '{X}' in s), None) is not None:
cost = 'X'
else:
converted = int(float(c.cmc))
cost = '7+' if converted >= 7 else str(converted)
costs[cost] = ci.get('n') + costs.get(cost, 0)
path = image(path, costs)
if acceptable_file(path):
return path
return cmc(deck_id, attempts + 1)
def image(path: str, costs: Dict[str, int]) -> str:
ys = ['0', '1', '2', '3', '4', '5', '6', '7+', 'X']
xs = [costs.get(k, 0) for k in ys]
sns.set_style('white')
sns.set(font='Concourse C3', font_scale=3)
g = sns.barplot(x=ys, y=xs, palette=['#cccccc'] * len(ys)) # pylint: disable=no-member
g.axes.yaxis.set_ticklabels([])
rects = g.patches
sns.set(font='Concourse C3', font_scale=2)
for rect, label in zip(rects, xs):
if label == 0:
continue
height = rect.get_height()
g.text(rect.get_x() + rect.get_width() / 2, height + 0.5, label, ha='center', va='bottom')
g.margins(y=0, x=0)
sns.despine(left=True, bottom=True)
g.get_figure().savefig(path, transparent=True, pad_inches=0, bbox_inches='tight')
plt.clf() # Clear all data from matplotlib so it does not persist across requests.
return path
def determine_path(name: str) -> str:
charts_dir = configuration.get_str('charts_dir')
pathlib.Path(charts_dir).mkdir(parents=True, exist_ok=True)
if not os.path.exists(charts_dir):
raise DoesNotExistException('Cannot store graph images because {charts_dir} does not exist.'.format(charts_dir=charts_dir))
return os.path.join(charts_dir, name)
def acceptable_file(path: str) -> bool:
if not os.path.exists(path):
return False
if os.path.getsize(path) >= 6860: # This is a few bytes smaller than a completely empty graph on prod.
return True
logger.warning('Chart at {path} is suspiciously small.'.format(path=path))
return False
| gpl-3.0 |
ajdawson/windspharm | examples/iris/rws_example.py | 1 | 2190 | """Compute Rossby wave source from the long-term mean flow.
This example uses the iris interface.
Additional requirements for this example:
* iris (http://scitools.org.uk/iris/)
* matplotlib (http://matplotlib.org/)
* cartopy (http://scitools.org.uk/cartopy/)
"""
import warnings
import cartopy.crs as ccrs
import iris
import iris.plot as iplt
from iris.coord_categorisation import add_month
import matplotlib as mpl
import matplotlib.pyplot as plt
from windspharm.iris import VectorWind
from windspharm.examples import example_data_path
mpl.rcParams['mathtext.default'] = 'regular'
# Read zonal and meridional wind components from file using the iris module.
# The components are in separate files. We catch warnings here because the
# files are not completely CF compliant.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
uwnd = iris.load_cube(example_data_path('uwnd_mean.nc'))
vwnd = iris.load_cube(example_data_path('vwnd_mean.nc'))
uwnd.coord('longitude').circular = True
vwnd.coord('longitude').circular = True
# Create a VectorWind instance to handle the computations.
w = VectorWind(uwnd, vwnd)
# Compute components of rossby wave source: absolute vorticity, divergence,
# irrotational (divergent) wind components, gradients of absolute vorticity.
eta = w.absolutevorticity()
div = w.divergence()
uchi, vchi = w.irrotationalcomponent()
etax, etay = w.gradient(eta)
etax.units = 'm**-1 s**-1'
etay.units = 'm**-1 s**-1'
# Combine the components to form the Rossby wave source term.
S = eta * -1. * div - (uchi * etax + vchi * etay)
S.coord('longitude').attributes['circular'] = True
# Pick out the field for December at 200 hPa.
time_constraint = iris.Constraint(month='Dec')
add_month(S, 'time')
S_dec = S.extract(time_constraint)
# Plot Rossby wave source.
clevs = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30]
ax = plt.subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
fill = iplt.contourf(S_dec * 1e11, clevs, cmap=plt.cm.RdBu_r, extend='both')
ax.coastlines()
ax.gridlines()
plt.colorbar(fill, orientation='horizontal')
plt.title('Rossby Wave Source ($10^{-11}$s$^{-1}$)', fontsize=16)
plt.show()
| mit |
gkulkarni/JetMorphology | fitjet_3d.py | 1 | 5370 | """
File: fitjet_3d.py
Fits a geometric model to mock jet data. Uses image subtraction;
otherwise same as fitjet.py
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.optimize as op
import emcee
import triangle
import sys
# These mock data are produced by jet3d.py.
a2 = np.fromfile('mockdata_3d_nc100.dat',dtype=np.float32)
def I(theta):
a, b, i, l, alpha, beta, gamma = theta
u = np.linspace(0.0, 20.0*np.pi, 1000)
def z(u):
return (a/(2.0*np.pi)) * u * (u/(2.0*np.pi))**beta
zv = z(u)
def x(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.cos(u)
def y(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.sin(u)
xv = x(u)
yv = y(u)
def ri(i):
return np.matrix([[np.cos(i), 0.0, np.sin(i)],[0.0, 1.0, 0.0],[-np.sin(i), 0.0, np.cos(i)]])
def rl(l):
return np.matrix([[np.cos(l), -np.sin(l), 0.0],[np.sin(l), np.cos(l), 0.0],[0.0, 0.0, 1.0]])
zvarr = zv*gamma
iarr = zvarr/zvarr.max()
iarr *= np.pi/2.0
c = np.dstack((xv, yv, zv))
c = np.squeeze(c)
d = np.zeros((1000,3))
lm = rl(l)
for n in range(1000):
d[n] = c[n]*ri(iarr[n])*lm
xv = d[:,0]
yv = d[:,1]
xv = xv[~np.isnan(xv)]
yv = yv[~np.isnan(yv)]
nc = 100
a = np.zeros((nc,nc),dtype=np.float32)
zl = xv.min() - 5.0
zu = xv.max() + 5.0
yl = yv.min() - 5.0
yu = yv.max() + 5.0
lz = zu - zl
ly = yu - yl
dz = lz/nc
dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2).
def zloc(cood):
return int((cood-zl)/dz) + 1
def yloc(cood):
return int((cood-yl)/dy) + 1
for i in xrange(xv.size):
zpos = zloc(xv[i])
ypos = yloc(yv[i])
a[ypos, zpos] += 1.0
return a.flatten()
def neglnlike(theta, intensity, intensity_err):
model = I(theta)
inv_sigma2 = 1.0/intensity_err**2
return 0.5*(np.sum((intensity-model)**2*inv_sigma2 - np.log(inv_sigma2)))
a2_err = np.zeros_like(a2)
a2_err += 0.1
theta_guess = (0.1, 10.0, 2.0, 3.0, 0.2, 2.0, 0.5)
result = op.minimize(neglnlike, theta_guess, args=(a2, a2_err), method='Nelder-Mead')
print result.x
print result.success
def lnprior(theta):
a, b, i, l, alpha, beta, gamma = theta
if (0.05 < a < 0.15 and
8.0 < b < 12.0 and
1.0 < i < 3.0 and
2.0 < l < 4 and
0.1 < alpha < 0.3 and
1.0 < beta < 3.0 and
0.3 < gamma < 0.7):
return 0.0
return -np.inf
def lnprob(theta, intensity, intensity_err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp - neglnlike(theta, intensity, intensity_err)
ndim, nwalkers = 7, 100
pos = [result.x + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(a2, a2_err))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
plot_chain = True
if plot_chain:
mpl.rcParams['font.size'] = '10'
nplots = 7
plot_number = 0
fig = plt.figure(figsize=(12, 6), dpi=100)
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,0], c='k', alpha=0.1)
ax.axhline(result.x[0], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$A$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,1], c='k', alpha=0.1)
ax.axhline(result.x[1], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel('$B$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,2], c='k', alpha=0.1)
ax.axhline(result.x[2], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$i_0$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\lambda_0$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\alpha$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\beta$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\gamma$')
ax.set_xlabel('step')
plt.savefig('chains.pdf',bbox_inches='tight')
mpl.rcParams['font.size'] = '14'
fig = triangle.corner(samples, labels=['$A$', '$B$', '$i_0$', r'$\lambda_0$', r'$\alpha$', r'$\beta$', r'$\gamma$'],
truths=result.x)
fig.savefig("triangle.pdf")
| mit |
ahoyosid/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
sdbonin/SOQresearch | SOQswapRK4.py | 1 | 8364 | # -*- coding: utf-8 -*-
"""
This code uses a loop along with our set of coupled differential equations and
matrix math to create arrays of 4-vector quaternions.
The old plotting functions need to be updated and incorperated into the end of
this code or a better visualization solution needs to be found.
"""
#------------------------------------------------------------------------------
# Importing modules and copying functions
# AKA "Setting stuff up"
#------------------------------------------------------------------------------
import numpy as np
from time import time as checktime
# a set of init quaternions and the identity matrix for building general q-matrices
rm = np.identity(2)
im = np.array([[-1j,0],[0,1j]])
jm = np.array([[0,1],[-1,0]])
km = np.array([[0,-1j],[-1j,0]])
def vec_mat(v):
'''
Converts a quaternion vector into the 2x2 imaginary matrix representation
'''
return v[0]*rm + v[1]*im + v[2]*jm + v[3]*km
def mat_vec(M):
'''
Converts a 2x2 imaginary matrix quaternion into its vector representation
'''
return np.array([ M[1,1].real , M[1,1].imag , M[0,1].real , -M[0,1].imag ])
def qvecmult(vec1,vec2):
'''
Multiplies two 4-vector quaternions via matrix math
'''
return mat_vec(np.dot(vec_mat(vec1),vec_mat(vec2)))
def qmatcon(M):
'''
conjugates a 2x2 imaginary matrix quaternion
'''
return vec_mat(mat_vec(M)*np.array([1,-1,-1,-1]))
def qveccon(vec):
'''
conjugates 4-vector quaternion
'''
return vec*np.array([1,-1,-1,-1])
def qvecnorm(vec):
'''
normalizes a 4-vector quaternion
'''
return vec/np.sqrt(qvecmult(qveccon(vec),vec)[0])
def qmatnorm(M):
'''
piggy-backs off the previous function to normalize 2x2 imaginary matrices
'''
return vec_mat(qvecnorm(mat_vec(M)))
def qvecmagsqr(vec):
'''
returns the magnitude squared of a 4-vector quaternion
'''
return qvecmult(qveccon(vec),vec)[0]
def qmatmagsqr(M):
'''
piggy-backs off the previous function to give the magnitude squared of 2x2 imaginary matrix
quaternions
'''
return qvecmagsqr(mat_vec(M))
#------------------------------------------------------------------------------
# Defining the differential equations
# AKA "Bringing (first) order to the universe"
#------------------------------------------------------------------------------
def q1_dot(q1,q2,p1,p2,a):
'''
takes the current value of things that we know and calculates derivatives
Function assumes 2x2 complex matrices as inputs for q1,q2,p1,p2
a is the coupling constant
'''
return (p1 - a*np.dot(q1,np.dot(qmatcon(q2),p2))) \
#/(1. - qmatmagsqr(q1)*qmatmagsqr(q2)*a**2)
def p1_dot(q1,q2,q1dot,q2dot,a,w):
'''
takes the current values of things we know and the hopefully recently
calculated derivatives of q1,q2 and uses them to find other derivatives
'''
return a*np.dot(q1dot,np.dot(qmatcon(q2dot),q2)) - q1*w**2
#------------------------------------------------------------------------------
# Defining necessary constants and initial conditions
# AKA "on the first day..."
#------------------------------------------------------------------------------
w = 1. # \omega_0 in our notation
a = 0.01 # coupling constant. \alpha in our notation
print 'alpha =',a
seed = 42
np.random.seed(seed)
print 'seed =',seed
q1 = vec_mat([1,0,0,0])
q2 = vec_mat([1,0,0,0])
p1 = np.random.rand(4)
p2 = np.random.rand(4)
p1[0] = 0
p2[0] = 0
p1 = vec_mat(p1)
p2 = vec_mat(p2)
q1 = qmatnorm(q1)
q2 = qmatnorm(q2)
p1 = qmatnorm(p1)
p2 = qmatnorm(p2)
#------------------------------------------------------------------------------
# Defining loop parameters
# AKA "Configuring the space-time continuum"
#------------------------------------------------------------------------------
dt = 0.01 #time step
t = 0
print 'dt = ',dt
q1a = [mat_vec(q1)]
p1a = [mat_vec(p1)]
s1a = [mat_vec(np.dot(qmatcon(p1),q1))]
q2a = [mat_vec(q2)]
p2a = [mat_vec(p2)]
s2a = [mat_vec(np.dot(qmatcon(p2),q2))]
time = [t]
swaptime = 0.8785/a #determined 'experimentally'
#------------------------------------------------------------------------------
# Checking conserved quantity
# AKA "might as well..."
#------------------------------------------------------------------------------
con = [] #checking to see if our conserved quantity is actually conserved
def conserved(q1,q2,p1,p2):
return np.dot(qmatcon(p1),q1) + np.dot(qmatcon(p2),q2)
#------------------------------------------------------------------------------
# Creating the time loop
# AKA "Let 'er rip"
#------------------------------------------------------------------------------
runtime = checktime()
while t<swaptime:
'''
This integrator works on an RK4 algorithm.
For a good explaination, see wikipedia
https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
note that the algorithm is modified slightly to fit our function
'''
q1k1 = q1_dot(q1,q2,p1,p2,a)
q2k1 = q1_dot(q2,q1,p2,p1,a)
p1k1 = p1_dot(q1,q2,q1k1,q2k1,a,w)
p2k1 = p1_dot(q2,q1,q2k1,q1k1,a,w)
q1k2 = q1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,p1+p1k1*dt/2.,p2+p2k1*dt/2.,a)
q2k2 = q1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,p2+p2k1*dt/2.,p1+p1k1*dt/2.,a)
p1k2 = p1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,q1k1,q2k1,a,w)
p2k2 = p1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,q2k1,q1k1,a,w)
q1k3 = q1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,p1+p1k2*dt/2.,p2+p2k2*dt/2.,a)
q2k3 = q1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,p2+p2k2*dt/2.,p1+p1k2*dt/2.,a)
p1k3 = p1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,q1k1,q2k1,a,w)
p2k3 = p1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,q2k1,q1k1,a,w)
q1k4 = q1_dot(q1+q1k3*dt,q2+q2k3*dt,p1+p1k3*dt,p2+p2k3*dt,a)
q2k4 = q1_dot(q2+q2k3*dt,q1+q1k3*dt,p2+p2k3*dt,p1+p1k3*dt,a)
p1k4 = p1_dot(q1+q1k3*dt,q2+q2k3*dt,q1k1,q2k1,a,w)
p2k4 = p1_dot(q2+q2k3*dt,q1+q1k3*dt,q2k1,q1k1,a,w)
q1 += (q1k1 + 2*q1k2 + 2*q1k3 + q1k4)*dt/6.
q2 += (q2k1 + 2*q2k2 + 2*q2k3 + q2k4)*dt/6.
p1 += (p1k1 + 2*p1k2 + 2*p1k3 + p1k4)*dt/6.
p2 += (p2k1 + 2*p2k2 + 2*p2k3 + p2k4)*dt/6.
t += dt
q1a.append(mat_vec(q1))
p1a.append(mat_vec(p1))
s1a.append(mat_vec(np.dot(qmatcon(p1),q1)))
q2a.append(mat_vec(q2))
p2a.append(mat_vec(p2))
s2a.append(mat_vec(np.dot(qmatcon(p2),q2)))
time.append(t)
runtime = checktime() - runtime
q1a = np.array(q1a)
q2a = np.array(q2a)
p1a = np.array(p1a)
p2a = np.array(p2a)
s1a = np.array(s1a)
s2a = np.array(s2a)
time = np.array(time)
#------------------------------------------------------------------------------
# Plotting things
# AKA "Can we see it now?"
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
def vecplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing[:,0],label='Real', color = 'black')
plt.plot(time,thing[:,1],label='i', color = 'red')
plt.plot(time,thing[:,2],label='j', color = 'green')
plt.plot(time,thing[:,3],label='k', color = 'blue')
plt.legend(loc='best')
plt.xlim([time[0], time[-1]])
plt.grid()
plt.show()
def scalarplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing,color = 'black')
plt.grid()
plt.xlim([time[0], time[-1]])
plt.show()
vecplot(q1a,time,'$q_1$')
vecplot(q2a,time,'$q_2$')
vecplot(p1a,time,'$p_1$')
vecplot(p2a,time,'$p_2$')
vecplot(s1a,time,'$p_1^{\dagger}q_1$')
vecplot(s2a,time,'$p_2^{\dagger}q_2$')
print 'Initial:'
print 'q1 = ', q1a[0]
print 'q2 = ', q2a[0]
print 'p1 = ', p1a[0]
print 'p2 = ', p2a[0]
print 's1 = ', s1a[0]
print 's2 = ', s2a[0]
print 'Final:'
print 'q1 = ', q1a[-1]
print 'q2 = ', q2a[-1]
print 'p1 = ', p1a[-1]
print 'p2 = ', p2a[-1]
print 's1 = ', s1a[-1]
print 's2 = ', s2a[-1]
print 'runtime is',runtime, 'seconds' | mit |
Subsets and Splits