blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efceb7bfde0ca3da44a812a43f838b7ac79170bb | 79eb159b3ee36eb76bd921be24081708f44ac735 | /tests/test_codec.py | d9f73bcd0a34d29503f034d3199e6d5a2172f9d8 | [] | no_license | osh/PyAV | d7139f8faf7ee0973376db807e3b917863e9fb73 | 5fa85fd142ee8dabf01f4873e29678aeca153b4f | refs/heads/master | 2021-01-18T13:24:52.202662 | 2015-12-11T04:01:52 | 2015-12-11T04:01:52 | 47,802,016 | 1 | 0 | null | 2015-12-11T03:12:18 | 2015-12-11T03:12:18 | null | UTF-8 | Python | false | false | 620 | py | from .common import *
from av.codec import Codec
from av.video.format import VideoFormat
class TestCodecs(TestCase):
def test_codec_mpeg4(self):
c = Codec('mpeg4')
self.assertEqual(c.name, 'mpeg4')
self.assertEqual(c.long_name, 'MPEG-4 part 2')
self.assertEqual(c.type, 'video')
self.assertEqual(c.id, 13)
self.assertTrue(c.is_encoder)
self.assertTrue(c.is_decoder)
formats = c.video_formats
self.assertTrue(formats)
self.assertIsInstance(formats[0], VideoFormat)
self.assertTrue(any(f.name == 'yuv420p' for f in formats))
| [
"[email protected]"
] | |
2614cf1f44792beeb55c2a2e4257282366b8da9c | f33b30743110532ddae286ba1b34993e61669ab7 | /869. Reordered Power of 2.py | 9fecfa97156c715493e1bcf2e58aab9b47cf8034 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | class Solution:
def reorderedPowerOf2(self, N):
"""
:type N: int
:rtype: bool
"""
from collections import Counter
c = Counter(str(N))
return any(c == Counter(str(1 << i)) for i in range(32))
a = Solution()
print(a.reorderedPowerOf2(16))
| [
"[email protected]"
] | |
100195dfd715236cf3362301bc411a12a0be41c5 | 693567f042c6bd93ecdda41cb5db81c55ccf3158 | /List/swap two elements in a list (another approach).py | c013b3bb504c2f734752ab41623c4161c62e0bf9 | [] | no_license | supriyo-pal/Python-Practice | 5806e0045ebfeb04856246a245430e2ab7921ba9 | 2025369f0d23d603ad27eaff149500137e98dbcf | refs/heads/main | 2023-01-25T05:31:58.404283 | 2020-12-09T19:08:22 | 2020-12-09T19:08:22 | 317,021,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 21:57:25 2020
@author: Supriyo
"""
number=input("enter the numbers between 0-9:")
number_list=list()
length=0
for i in range(len(number)):
number_list.append(number[i])
length=len(number_list)
print("Choose one position between 0 to",length)
pos1=int(input())
print("Choose another position except ",pos1)
pos2=int(input())
def swapPositions(list, pos1, pos2):
# popping both the elements from list
first_ele = list.pop(pos1)
second_ele = list.pop(pos2-1)
# inserting in each others positions
list.insert(pos1, second_ele)
list.insert(pos2, first_ele)
return list
# Driver function
print(swapPositions(number_list, pos1-1, pos2-1)) | [
"[email protected]"
] | |
f735e541e2852a473ab392064cf9429ac1a90ffc | 0db19410e9751790af8ce4a0a9332293e379c02f | /configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py | b4263f25e741e25a0ec5b85900ff1b2587d2805d | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 6,656 | py | _base_ = ['../../../_base_/default_runtime.py']
# runtime
max_epochs = 420
stage2_num_epochs = 30
base_lr = 4e-3
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
randomness = dict(seed=21)
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
# use cosine lr from 210 to 420 epoch
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=1024)
# codec settings
codec = dict(
type='SimCCLabel',
input_size=(192, 256),
sigma=(4.9, 5.66),
simcc_split_ratio=2.0,
normalize=False,
use_dark=False)
# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
_scope_='mmdet',
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
out_indices=(4, ),
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa
)),
head=dict(
type='RTMCCHead',
in_channels=512,
out_channels=17,
input_size=codec['input_size'],
in_featuremap_size=(6, 8),
simcc_split_ratio=codec['simcc_split_ratio'],
final_layer_kernel_size=7,
gau_cfg=dict(
hidden_dims=256,
s=128,
expansion_factor=2,
dropout_rate=0.,
drop_path=0.,
act_fn='SiLU',
use_rel_bias=False,
pos_enc=False),
loss=dict(
type='KLDiscretLoss',
use_target_weight=True,
beta=10.,
label_softmax=True),
decoder=codec),
test_cfg=dict(flip_test=True))
# base dataset settings
dataset_type = 'HumanArtDataset'
data_mode = 'topdown'
data_root = 'data/'
backend_args = dict(backend='local')
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/',
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/'
# }))
# pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=1.),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform',
shift_factor=0.,
scale_factor=[0.75, 1.25],
rotate_factor=60),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=0.5),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=256,
num_workers=10,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='HumanArt/annotations/training_humanart_coco.json',
data_prefix=dict(img=''),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=64,
num_workers=10,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='HumanArt/annotations/validation_humanart.json',
# bbox_file=f'{data_root}HumanArt/person_detection_results/'
# 'HumanArt_validation_detections_AP_H_56_person.json',
data_prefix=dict(img=''),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# hooks
default_hooks = dict(
checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]
# evaluators
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'HumanArt/annotations/validation_humanart.json')
test_evaluator = val_evaluator
| [
"[email protected]"
] | |
1cc3c3e0a40e800b3eca55bc1f2adf1f5bbcee2a | 0fb867b48b5a0bd88f9fefb5cdcad0b4abe720b6 | /calculator.spec | 646bfee63aa626e392df8c449574bd17d9edbe61 | [] | no_license | sparshjaincs/Simple-Calculator | c010181d0ad0bc09719f813e6d91f7b87d990d5d | 76c597c2e59a806c8d8a93ad8b798288639e7da1 | refs/heads/master | 2020-06-21T17:10:16.012591 | 2019-07-18T19:15:22 | 2019-07-18T19:15:22 | 197,511,687 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['calculator.py'],
pathex=['G:\\Projects\\Calculator'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='calculator',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"[email protected]"
] | |
eb07d2a3f8f793245785b8e732d7d785d49671b6 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/scipy/interpolate/fitpack2.py | 0f14d84f30435f315adac039526c16ae5d5cd92f | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:422178a9adf00766a95a781c4d8e1837d120bc65542ddd936c9f14decc375ae8
size 62749
| [
"[email protected]"
] | |
f046f12d7b3f16ea03cc78bebd1b08607193f082 | c086a38a366b0724d7339ae94d6bfb489413d2f4 | /PythonEnv/Lib/site-packages/pythonwin/pywin/framework/editor/frame.py | 9e74114d102460a9401b98c1320ac20636a4a733 | [] | no_license | FlowkoHinti/Dionysos | 2dc06651a4fc9b4c8c90d264b2f820f34d736650 | d9f8fbf3bb0713527dc33383a7f3e135b2041638 | refs/heads/master | 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | # frame.py - The MDI frame window for an editor.
import pywin.framework.window
import win32ui
import win32con
import afxres
from . import ModuleBrowser
class EditorFrame(pywin.framework.window.MDIChildWnd):
def OnCreateClient(self, cp, context):
# Create the default view as specified by the template (ie, the editor view)
view = context.template.MakeView(context.doc)
# Create the browser view.
browserView = ModuleBrowser.BrowserView(context.doc)
view2 = context.template.MakeView(context.doc)
splitter = win32ui.CreateSplitter()
style = win32con.WS_CHILD | win32con.WS_VISIBLE
splitter.CreateStatic(self, 1, 2, style, win32ui.AFX_IDW_PANE_FIRST)
sub_splitter = self.sub_splitter = win32ui.CreateSplitter()
sub_splitter.CreateStatic(splitter, 2, 1, style, win32ui.AFX_IDW_PANE_FIRST + 1)
# Note we must add the default view first, so that doc.GetFirstView() returns the editor view.
sub_splitter.CreateView(view, 1, 0, (0, 0))
splitter.CreateView(browserView, 0, 0, (0, 0))
sub_splitter.CreateView(view2, 0, 0, (0, 0))
## print "First view is", context.doc.GetFirstView()
## print "Views are", view, view2, browserView
## print "Parents are", view.GetParent(), view2.GetParent(), browserView.GetParent()
## print "Splitter is", splitter
## print "sub splitter is", sub_splitter
## Old
## splitter.CreateStatic (self, 1, 2)
## splitter.CreateView(view, 0, 1, (0,0)) # size ignored.
## splitter.CreateView (browserView, 0, 0, (0, 0))
# Restrict the size of the browser splitter (and we can avoid filling
# it until it is shown)
splitter.SetColumnInfo(0, 10, 20)
# And the active view is our default view (so it gets initial focus)
self.SetActiveView(view)
def GetEditorView(self):
# In a multi-view (eg, splitter) environment, get
# an editor (ie, scintilla) view
# Look for the splitter opened the most!
if self.sub_splitter is None:
return self.GetDlgItem(win32ui.AFX_IDW_PANE_FIRST)
v1 = self.sub_splitter.GetPane(0, 0)
v2 = self.sub_splitter.GetPane(1, 0)
r1 = v1.GetWindowRect()
r2 = v2.GetWindowRect()
if r1[3] - r1[1] > r2[3] - r2[1]:
return v1
return v2
def GetBrowserView(self):
# XXX - should fix this :-)
return self.GetActiveDocument().GetAllViews()[1]
def OnClose(self):
doc = self.GetActiveDocument()
if not doc.SaveModified():
## Cancel button selected from Save dialog, do not actually close
## print 'close cancelled'
return 0
## So the 'Save' dialog doesn't come up twice
doc._obj_.SetModifiedFlag(False)
# Must force the module browser to close itself here (OnDestroy for the view itself is too late!)
self.sub_splitter = None # ensure no circles!
self.GetBrowserView().DestroyBrowser()
return self._obj_.OnClose()
| [
"="
] | = |
a2a2518930512317c83f34ef6273bff3efd67fe4 | 88a54c5e2cf3d16e5288261a37840428bf6c4834 | /src/article_loader.py | 8654a31e9a45bdbf8fdbf8d3c4253eac3d4185af | [] | no_license | VitalyRomanov/document-clustering | f2fa1c617ef8f4e2ba69ba0c152d80c919361b25 | 412a21b857b79a644f77b728b8798dda9e854e29 | refs/heads/master | 2022-04-07T22:04:30.804892 | 2018-02-03T18:50:25 | 2018-02-03T18:50:25 | 104,849,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,666 | py | import json
import os
# import pickle as p
import joblib as p
from datetime import datetime
import urllib.request
import numpy as np
def date2int(date):
return int(datetime.strptime(date, '%Y-%m-%d %H:%M:%S').timestamp())
def get_date(ts):
return datetime.fromtimestamp(
int(repr(ts))
).strftime('%Y-%m-%d %H:%M:%S')
def load_latest():
dump_file = "articles_dump.dat"
l_time = 1509031277
if os.path.isfile(dump_file):
articles = p.load(open(dump_file, "rb"))
else:
articles = []
return articles
# def retreive_articles(l_time):
# data = json.load(open('1509031277.json'))
# # retreive articles' dates
# dates = list(map(date2int, map(lambda x: x['public_date'], data)))
# # sort articles by date
# s_ind = sorted(range(len(dates)), key=lambda k: dates[k])
# s_data = [data[ind] for ind in s_ind]
# return s_data
def retreive_articles_url(time):
"""
:param time: the last available record, encodes time as integer
:return: list of article records sorted by date
"""
url_addr = "https://www.business-gazeta.ru/index/monitoring/timestamp/%d" % time
data = None
with urllib.request.urlopen(url_addr) as url:
data = json.loads(url.read().decode())
dates = list(map(date2int, map(lambda x: x['public_date'], data)))
# sort articles by date
s_ind = sorted(range(len(dates)), key=lambda k: dates[k])
s_data = [data[ind] for ind in s_ind]
return s_data
def post_json(data_json):
url_addr = "https://www.business-gazeta.ru/index/similar"
enc_json = data_json.encode('utf-8')
req = urllib.request.Request(url_addr, data=enc_json,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
# def get_last_time(articles):
# return articles[-1] if len(articles) != 0 else 0
# latest = 0
# for article in articles:
# candidate = date2int(article['public_date'])
# if candidate > latest:
# latest = candidate
# return latest
def get_sections(s_data):
# split data into sections
ids = list(map(lambda x: x['id'], s_data))
titles = list(map(lambda x: x['title'], s_data))
content = list(map(lambda x: x['content'], s_data))
dates = list(map(date2int, map(lambda x: x['public_date'], s_data)))
links = list(map(lambda x: x['link'], s_data))
return ids, titles, content, dates, links
class AData:
ids = None
titles = None
content = None
dates = None
links = None
_TWO_DAYS = 60 * 60 * 24 * 2 # sec*min*hr*2d
def __init__(self):
self.ids = []
self.titles = []
self.content = []
self.dates = []
self.links = []
articles_data = get_sections(load_latest())
self.join_sections(articles_data)
self._latest = self.get_last_time()
self.new = len(self.ids)
def load_new(self):
self._latest = self.get_last_time()
self.new = len(self.ids)
print("Retreiving after %s" % get_date(self._latest), end=": ")
new_articles = retreive_articles_url(self._latest)
articles_data = get_sections(new_articles)
self.join_sections(articles_data)
self.new = len(new_articles)
if self.new == 0:
print("Nothing new")
else:
print("%d added" % self.new)
def join_sections(self, articles_data):
ids, titles, content, dates, links = articles_data
self.ids += ids
self.titles += titles
self.content += content
self.dates += dates
self.links += links
def get_article(self, a_id):
return self.content[a_id]
def get_last_time(self):
return self.dates[-1] if len(self.dates) > 0 else 1509031277
def two_days_range(self, id1, id2):
return True if abs(self.dates[id1] - self.dates[id2]) < self._TWO_DAYS else False
def get_last_two_days(self, a_id):
begin_with = self.ids.index(a_id)
ids = []
for i in range(begin_with, -1, -1):
if self.two_days_range(begin_with, i):
ids.append(i)
else:
break
return np.array(ids)
def make_json(self, doc_id, similar_id):
return json.dumps({"article_id": self.ids[doc_id],
"similar_id": [self.ids[s_id] for s_id in similar_id]},
indent=4)
def get_latest(self, last_id, content_type='titles', filter_bl = True):
"""
Input: last_id - the id in self.ids.
content_type - optional. Specifies whether to return titles or articles'
body
filter_bl - specifies whether to apply blacklist filtering or not
Returns: all documents and ids that appear after the doc with last_id
"""
try:
last_pos = self.ids.index(last_id)
except:
if last_id != -1:
raise Exception("No document with such id")
last_pos = last_id
if content_type == 'titles':
content_source = self.titles
elif content_type == 'content':
content_source = self.content
else:
raise NotImplemented
latest_ids = []
latest_content = []
for i in range(last_pos + 1, len(self.ids)):
if filter_bl and self.is_blacklisted(i):
continue
latest_ids.append(self.ids[i])
latest_content.append(content_source[i])
return {'ids': latest_ids, 'docs': latest_content}
def get_titles(self, last_n=-1):
"""
:param last_n: the number of latest titles to return
:return: dictionary that contains ids and the content of titles
"""
titles_total = len(self.titles)
if last_n == -1:
titles_range = range(titles_total)
else:
titles_range = range(max(titles_total - last_n, 0), titles_total)
titles_ids = []
titles_content = []
for i in titles_range:
if not self.is_blacklisted(i):
titles_ids.append(self.ids[i])
titles_content.append(self.titles[i])
return {'ids': titles_ids, 'titles': titles_content}
def is_blacklisted(self, ind: int) -> bool:
black_list = ['realnoevremya.ru', 'tatcenter.ru']
url = self.links[ind].split("/")[2]
return url in black_list
def load(path):
return p.load(open(path, "rb"))
def save(self,path):
p.dump(self, open(path, "wb"))
| [
"[email protected]"
] | |
be1d104b2f9883aeb4d68360c1c230337ff776cd | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_EstimateQuantileMixtureStressTest.py | fb9d5a602ba8a987d718463a4796f474687762ad | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,523 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EstimateQuantileMixtureStressTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EstimateQuantileMixtureStressTest&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=e-sta-ssessq-uant-copy-1).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, zeros, var, \
mean
from numpy.random import rand
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, bar, legend, subplots, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from QuantileMixture import QuantileMixture
# -
# ## Compute error, bias and inefficiency for every estimator and for every DGP within the stress-test set
# +
# define estimators
g_b = lambda X: mean(X, 1, keepdims=True)
g_e = lambda X: np.median(X, 1, keepdims=True)
# generate the scenarios for the time series
t_ = 50
j_ = 10 ** 4
alpha = 0.5
sigma_Y = 0.2
mu_Z = 0
sigma_Z = 0.15
S = arange(0,0.22,0.02) # stress-test set for parameter mu_Y
k_ = len(S)
I = zeros((j_, t_))
er_b = zeros(k_)
er_e = zeros(k_)
bias2_b = zeros(k_)
bias2_e = zeros(k_)
inef2_b = zeros(k_)
inef2_e = zeros(k_)
for k in range(k_):
# compute the true value of the property
mu_Y = S[k]
g_f = QuantileMixture(0.5, alpha, mu_Y, sigma_Y, mu_Z, sigma_Z)
# generate j_ simulations of the time series
P = rand(j_, t_)
for j in range(j_):
I[j,:] = QuantileMixture(P[j, :], alpha, mu_Y, sigma_Y, mu_Z, sigma_Z)
# compute simulations of the estimators
G_b = g_b(I)
G_e = g_e(I)
# compute the losses of the estimators
L_b = (G_b - g_f) ** 2
L_e = (G_e - g_f) ** 2
# compute errors
er_b[k] = mean(L_b)
er_e[k] = mean(L_e)
# compute square bias
bias2_b[k] = (mean((G_b) - g_f)) ** 2
bias2_e[k] = (mean((G_e) - g_f)) ** 2
# compute square inefficiency
inef2_b[k] = var(G_b, ddof=1)
inef2_e[k] = var(G_e, ddof=1)
# -
# ## Compute robust and ensemble errors
# +
er_rob_b = max(er_b)
er_rob_e = max(er_e)
er_ens_b = mean(er_b)
er_ens_e = mean(er_e)
# -
# ## Determine the optimal estimator
# best robust estimator
er_rob = min([er_rob_b, er_rob_e]),
# best ensemble estimator
er_ens = min([er_ens_b, er_ens_e])
# ## plot error, bias and inefficiency for each DGP within the stress-test set
# +
red = [.9, .4, 0]
blue = [0, .45, .7]
f, ax = subplots(2,1)
plt.sca(ax[0])
b = bar(range(1,k_+1),bias2_b.T+inef2_b.T, facecolor= red, label='bias$^2$')
b = bar(range(1,k_+1),inef2_b.T,facecolor= blue,label='ineff$^2$')
plot(range(1,k_+1), er_b, 'k',lw=1.5, label='error')
plt.xticks(range(0,k_+2,2))
legend()
title('stress-test of estimator b')
plt.sca(ax[1])
b = bar(range(1,k_+1),bias2_e.T+inef2_e.T,facecolor= red)
b = bar(range(1,k_+1),inef2_e.T,facecolor= blue)
plot(range(1,k_+1), er_e, 'k',lw= 1.5)
plt.xticks(range(0,k_+2,2))
title('stress-test of estimator e')
plt.tight_layout();
plt.show()
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"[email protected]"
] | |
78143c4e6942051b155a1e0dc181ef0d38715934 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_genexps.py | cc75ac26ee667116ef05274e3e3a41516ae62aeb | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 7,149 | py | doctests = """
Test simple loop with conditional
>>> sum(i*i for i in range(100) if i&1 == 1)
166650
Test simple nesting
>>> list((i,j) for i in range(3) for j in range(4) )
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list((i,j) for i in range(4) for j in range(i) )
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum(i*i for i in range(100))
328350
>>> i
20
Test first class
>>> g = (i*i for i in range(4))
>>> type(g)
<class 'generator'>
>>> list(g)
[0, 1, 4, 9]
Test direct calls to next()
>>> g = (i*i for i in range(3))
>>> next(g)
0
>>> next(g)
1
>>> next(g)
4
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
Does it stay stopped?
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
>>> list(g)
[]
Test running gen when defining function is out of scope
>>> def f(n):
... return (i*i for i in range(n))
>>> list(f(10))
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(4) if j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> list(f(2))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
Verify that parenthesis are required in a statement
>>> def f(n):
... return i*i for i in range(n)
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = i for i in range(10))
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = (i for i in range(10))) #doctest: +ELLIPSIS
{'a': <generator object <genexpr> at ...>}
Verify early binding for the outermost for-expression
>>> x=10
>>> g = (i*i for i in range(x))
>>> x = 5
>>> list(g)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Verify that the outermost for-expression makes an immediate check
for iterability
>>> (i for i in 6)
Traceback (most recent call last):
File "<pyshell#4>", line 1, in -toplevel-
(i for i in 6)
TypeError: 'int' object is not iterable
Verify late binding for the outermost if-expression
>>> include = (2,4,6,8)
>>> g = (i*i for i in range(10) if i in include)
>>> include = (1,3,5,7,9)
>>> list(g)
[1, 9, 25, 49, 81]
Verify late binding for the innermost for-expression
>>> g = ((i,j) for i in range(3) for j in range(x))
>>> x = 4
>>> list(g)
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Verify re-use of tuples (a side benefit of using genexps over listcomps)
>>> from test.support import check_impl_detail
>>> tupleids = list(map(id, ((i,i) for i in range(10))))
>>> int(max(tupleids) - min(tupleids)) if check_impl_detail() else 0
0
Verify that syntax error's are raised for genexps used as lvalues
>>> (y for y in (1,2)) = 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
>>> (y for y in (1,2)) += 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
########### Tests borrowed from or inspired by test_generators.py ############
Make a generator that acts like range()
>>> yrange = lambda n: (i for i in range(n))
>>> list(yrange(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
>>> list(zrange(5))
[0, 1, 2, 3, 4]
Verify that a gen exp cannot be resumed while it is actively running:
>>> g = (next(me) for i in range(10))
>>> me = g
>>> next(me)
Traceback (most recent call last):
File "<pyshell#30>", line 1, in -toplevel-
next(me)
File "<pyshell#28>", line 1, in <generator expression>
g = (next(me) for i in range(10))
ValueError: generator already executing
Verify exception propagation
>>> g = (10 // i for i in (5, 0, 2))
>>> next(g)
2
>>> next(g)
Traceback (most recent call last):
File "<pyshell#37>", line 1, in -toplevel-
next(g)
File "<pyshell#35>", line 1, in <generator expression>
g = (10 // i for i in (5, 0, 2))
ZeroDivisionError: integer division or modulo by zero
>>> next(g)
Traceback (most recent call last):
File "<pyshell#38>", line 1, in -toplevel-
next(g)
StopIteration
Make sure that None is a valid return value
>>> list(None for i in range(10))
[None, None, None, None, None, None, None, None, None, None]
Check that generator attributes are present
>>> g = (i*i for i in range(3))
>>> expected = set(['gi_frame', 'gi_running'])
>>> set(attr for attr in dir(g) if not attr.startswith('__')) >= expected
True
>>> print(g.__next__.__doc__)
x.__next__() <==> next(x)
>>> import types
>>> isinstance(g, types.GeneratorType)
True
Check the __iter__ slot is defined to return self
>>> iter(g) is g
True
Verify that the running flag is set properly
>>> g = (me.gi_running for i in (0,1))
>>> me = g
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
Verify that genexps are weakly referencable
>>> import weakref
>>> g = (i*i for i in range(4))
>>> wr = weakref.ref(g)
>>> wr() is g
True
>>> p = weakref.proxy(g)
>>> list(p)
[0, 1, 4, 9]
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import support
from test import test_genexps
support.run_doctest(test_genexps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_doctest(test_genexps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| [
"[email protected]"
] | |
43d2678fe00adbaa6aeb89d3ac85cee449782bf5 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/tankerkoenig/binary_sensor.py | 5f10b54f7042763cd7b371c8f9cef7f5b76c43ec | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 2,257 | py | """Tankerkoenig binary sensor integration."""
from __future__ import annotations
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import TankerkoenigCoordinatorEntity, TankerkoenigDataUpdateCoordinator
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the tankerkoenig binary sensors."""
coordinator: TankerkoenigDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
stations = coordinator.stations.values()
entities = []
for station in stations:
sensor = StationOpenBinarySensorEntity(
station,
coordinator,
coordinator.show_on_map,
)
entities.append(sensor)
_LOGGER.debug("Added sensors %s", entities)
async_add_entities(entities)
class StationOpenBinarySensorEntity(TankerkoenigCoordinatorEntity, BinarySensorEntity):
"""Shows if a station is open or closed."""
_attr_device_class = BinarySensorDeviceClass.DOOR
def __init__(
self,
station: dict,
coordinator: TankerkoenigDataUpdateCoordinator,
show_on_map: bool,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, station)
self._station_id = station["id"]
self._attr_name = (
f"{station['brand']} {station['street']} {station['houseNumber']} status"
)
self._attr_unique_id = f"{station['id']}_status"
if show_on_map:
self._attr_extra_state_attributes = {
ATTR_LATITUDE: station["lat"],
ATTR_LONGITUDE: station["lng"],
}
@property
def is_on(self) -> bool | None:
"""Return true if the station is open."""
data: dict = self.coordinator.data[self._station_id]
return data is not None and data.get("status") == "open"
| [
"[email protected]"
] | |
94c88e893fab70eb22becd4d8470f07518bbf6a5 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py | ea1ba9e9b7e48392782524321a3dcf960ee5d629 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:831ddc7c07f1bc0699f90cfd23a91ebe38264e16cd0c35dcf82dab49654d5e00
size 1601
| [
"[email protected]"
] | |
d00f8a5113df64077c306d43ae28a8fd05eda42a | ae4e517aebe74a851df977af1a11d2a67120050c | /h2o-py/tests/testdir_munging/unop/pyunit_expr_math_ops.py | 970952c2b4e4d21a1f40dda8da7beca2cf42bea5 | [
"Apache-2.0"
] | permissive | StephaneFeniar/h2o-dev | 8dd06549ddee490d6db5b7dd41f043e061cee121 | 2c0c69aeda69d08be5edce330bf34898e9b2ab2b | refs/heads/master | 2021-01-14T08:51:40.694426 | 2015-04-18T21:01:23 | 2015-04-18T21:01:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,544 | py | import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
import random
import math
import scipy.special
def expr_math_ops(ip,port):
# Connect to h2o
h2o.init(ip,port)
sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]
asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]
acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]
abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]
h2o_data1 = h2o.H2OFrame(python_obj=sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
h2o_data2 = h2o.H2OFrame(python_obj=asin_acos_atanh_data)
h2o_data3 = h2o.H2OFrame(python_obj=acosh_data)
h2o_data4 = h2o.H2OFrame(python_obj=abs_data)
np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
np_data2 = np.array(asin_acos_atanh_data)
np_data3 = np.array(acosh_data)
np_data4 = np.array(abs_data)
row, col = h2o_data1.dim()
def check_values(h2o_data, numpy_data):
success = True
for i in range(10):
r = random.randint(0,row-1)
c = random.randint(0,col-1)
h2o_val = h2o.as_list(h2o_data[r,c])[0][0]
num_val = numpy_data[r,c]
if not abs(h2o_val - num_val) < 1e-06:
success = False
print "check unsuccessful! h2o computed {0} and numpy computed {1}".format(h2o_val,num_val)
return success
h2o_data1 = h2o_data1 + 2
h2o_data2 = h2o_data2 / 1.01
h2o_data3 = h2o_data3 * 1.5
h2o_data4 = h2o_data4 - 1.5
np_data1 = np_data1 + 2
np_data2 = np_data2 / 1.01
np_data3 = np_data3 * 1.5
np_data4 = np_data4 - 1.5
assert check_values(h2o.cos(h2o_data1), np.cos(np_data1)), "expected equal cos values between h2o and numpy"
assert check_values(h2o.sin(h2o_data1), np.sin(np_data1)), "expected equal sin values between h2o and numpy"
assert check_values(h2o.tan(h2o_data1), np.tan(np_data1)), "expected equal tan values between h2o and numpy"
assert check_values(h2o.acos(h2o_data2), np.arccos(np_data2)), "expected equal acos values between h2o and numpy"
assert check_values(h2o.asin(h2o_data2), np.arcsin(np_data2)), "expected equal asin values between h2o and numpy"
assert check_values(h2o.atan(h2o_data1), np.arctan(np_data1)), "expected equal atan values between h2o and numpy"
assert check_values(h2o.cosh(h2o_data1), np.cosh(np_data1)), "expected equal cosh values between h2o and numpy"
assert check_values(h2o.sinh(h2o_data1), np.sinh(np_data1)), "expected equal sinh values between h2o and numpy"
assert check_values(h2o.tanh(h2o_data1), np.tanh(np_data1)), "expected equal tanh values between h2o and numpy"
assert check_values(h2o.acosh(h2o_data3), np.arccosh(np_data3)), "expected equal acosh values between h2o and numpy"
assert check_values(h2o.asinh(h2o_data1), np.arcsinh(np_data1)), "expected equal asinh values between h2o and numpy"
assert check_values(h2o.atanh(h2o_data2), np.arctanh(np_data2)), "expected equal atanh values between h2o and numpy"
assert check_values(h2o.cospi(h2o_data2/math.pi), np.cos(np_data2)), "expected equal cospi values between h2o and numpy"
assert check_values(h2o.sinpi(h2o_data2/math.pi), np.sin(np_data2)), "expected equal sinpi values between h2o and numpy"
assert check_values(h2o.tanpi(h2o_data2/math.pi), np.tan(np_data2)), "expected equal tanpi values between h2o and numpy"
assert check_values(h2o.abs(h2o_data4), np.fabs(np_data4)), "expected equal abs values between h2o and numpy"
assert check_values(h2o.sign(h2o_data2), np.sign(np_data2)), "expected equal sign values between h2o and numpy"
assert check_values(h2o.sqrt(h2o_data3), np.sqrt(np_data3)), "expected equal sqrt values between h2o and numpy"
assert check_values(h2o.trunc(h2o_data3), np.trunc(np_data3)), "expected equal trunc values between h2o and numpy"
assert check_values(h2o.ceil(h2o_data3), np.ceil(np_data3)), "expected equal ceil values between h2o and numpy"
assert check_values(h2o.floor(h2o_data3), np.floor(np_data3)), "expected equal floor values between h2o and numpy"
assert check_values(h2o.log(h2o_data3), np.log(np_data3)), "expected equal log values between h2o and numpy"
assert check_values(h2o.log10(h2o_data3), np.log10(np_data3)), "expected equal log10 values between h2o and numpy"
assert check_values(h2o.log1p(h2o_data3), np.log1p(np_data3)), "expected equal log1p values between h2o and numpy"
assert check_values(h2o.log2(h2o_data3), np.log2(np_data3)), "expected equal log2 values between h2o and numpy"
assert check_values(h2o.exp(h2o_data3), np.exp(np_data3)), "expected equal exp values between h2o and numpy"
assert check_values(h2o.expm1(h2o_data3), np.expm1(np_data3)), "expected equal expm1 values between h2o and numpy"
h2o_val = h2o.as_list(h2o.gamma(h2o_data3))[5][5]
num_val = math.gamma(h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.lgamma(h2o_data3))[5][5]
num_val = math.lgamma(h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.digamma(h2o_data3))[5][5]
num_val = scipy.special.polygamma(0,h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.trigamma(h2o_data3))[5][5]
num_val = scipy.special.polygamma(1,h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and math".format(h2o_val,num_val)
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_math_ops)
| [
"[email protected]"
] | |
54784ae241ebb27af2105733d27895990c63c635 | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/4673.py | 28d025abbfa54b5cb36be7af6190215810610b63 | [] | no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | l=lambda n:n%10+l(n//10) if n>0 else 0
a=[1]*20002
for i in range(1,10000):a[l(i)+i]=0
for i in range(1,10000):
if a[i]==1:print(i) | [
"[email protected]"
] | |
5df196843c25b81138c44c75987e86e0af7debc1 | 3dfa65c42241d866dcf82d2f6faf603e5aec096c | /gladweb/views/index.py | 340e3f77f04331b417dcf26cd0303c55a194ac4b | [] | no_license | Dav1dde/glad-web | 0ad5f11f4ca0966ae29b4c1972a02295bdd6c47c | ff05bd08efca97c2f40fbf3e9f8fde265b7c8e7d | refs/heads/master | 2023-03-07T18:31:52.638325 | 2023-02-25T16:14:12 | 2023-02-25T16:14:12 | 35,337,528 | 396 | 74 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | import json
import os
import sys
import tempfile
import zipfile
from collections import namedtuple
from flask import Blueprint, request, render_template, g, url_for, redirect, flash, current_app
import glad.lang.c.generator
from glad.spec import SPECS
from gladweb.views.exception import InvalidUserInput
if sys.version_info >= (3, 0):
from itertools import zip_longest, chain
from urllib.parse import urlencode
else:
from itertools import izip_longest as zip_longest, chain
from urllib import urlencode
Version = namedtuple('Version', ['major', 'minor'])
index = Blueprint('index', __name__)
@index.route('/', methods=['GET'])
def landing():
return render_template(
'index.html', **g.metadata.as_dict()
)
def validate_form():
language = request.form.get('language')
specification = request.form.get('specification')
profile = request.form.get('profile', 'compatibility')
apis = request.form.getlist('api')
extensions = request.form.getlist('extensions')
loader = request.form.get('loader') is not None
omitkhr = request.form.get('omitkhr') is not None
local_files = request.form.get('localfiles') is not None
messages = list()
if language not in (l.id for l in g.metadata.languages):
raise InvalidUserInput('Invalid language "{0}"'.format(language))
if specification not in (s.id for s in g.metadata.specifications):
raise InvalidUserInput('Invalid specification "{0}"'.format(specification))
if profile not in (p.id for p in g.metadata.profiles):
raise InvalidUserInput('Invalid profile "{0}"'.format(profile))
apis_parsed = dict()
for api in apis:
name, version = api.split('=')
if version == 'none':
continue
apis_parsed[name] = Version(*map(int, version.split('.')))
if len(apis_parsed) == 0:
raise InvalidUserInput(
'No API for specification selected'.format(specification)
)
return messages, language, specification, profile, apis_parsed, extensions, loader, omitkhr, local_files
def write_dir_to_zipfile(path, zipf, exclude=None):
if exclude is None:
exclude = []
for root, dirs, files in os.walk(path):
for file_ in files:
if file_ in exclude:
continue
zipf.write(
os.path.join(root, file_),
os.path.relpath(os.path.join(root, file_), path)
)
def glad_generate():
# this is really getting ugly, where did my code quality standards go?
messages, language, specification, profile, apis, extensions, loader_enabled, omitkhr, local_files = validate_form()
cls = SPECS[specification]
spec = cls.fromstring(g.cache.open_specification(specification).read())
if spec.NAME == 'gl':
spec.profile = profile
generator_cls, loader_cls = glad.lang.get_generator(
language, spec.NAME.lower()
)
if loader_cls is None:
raise InvalidUserInput('API/Spec not yet supported')
loader = loader_cls(apis)
loader.disabled = not loader_enabled
loader.local_files = local_files
glad.lang.c.generator.KHRPLATFORM = 'file:' + g.cache.get_khrplatform()
# the suffix is required because mkdtemp sometimes creates directories with an
# underscore at the end, we later use werkzeug.utils.secure_filename on that directory,
# this function happens to strip underscores...
directory = tempfile.mkdtemp(dir=current_app.config['TEMP'], suffix='glad')
os.chmod(directory, 0o750)
with generator_cls(directory, spec, apis, extensions, loader, local_files=local_files, omit_khrplatform=omitkhr) as generator:
generator.generate()
zip_path = os.path.join(directory, 'glad.zip')
with open(zip_path, 'wb') as fobj:
zipf = zipfile.ZipFile(fobj, mode='w')
write_dir_to_zipfile(directory, zipf, exclude=['glad.zip'])
zipf.close()
serialized = urlencode(list(chain.from_iterable(
zip_longest('', x[1], fillvalue=x[0]) for x in request.form.lists())
))
serialized_path = os.path.join(directory, '.serialized')
with open(serialized_path, 'w') as fobj:
json.dump({'params': serialized, 'messages': messages}, fobj)
name = os.path.split(directory)[1]
if current_app.config['FREEZE']:
current_app.freezer.freeze(name)
return url_for('generated.autoindex', root=name)
@index.route('/generate', methods=['POST'])
def generate():
try:
url = glad_generate()
except Exception as e:
import gladweb
if gladweb.sentry is not None:
gladweb.sentry.captureException()
current_app.logger.exception(e)
current_app.logger.error(request.form)
flash(str(e), category='error')
return redirect(url_for('index.landing'))
return redirect(url)
| [
"[email protected]"
] | |
e44e9989565a9d1ffcbc9142748500ff5a274785 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201022114431.py | 652f1b7cfbfba3d9c2325e1ea062cb799ef42b97 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self):
interlan_page = self.get('internal_page')
external_l
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
interal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
| [
"[email protected]"
] | |
566b949d5b6105ffa0ac3812e25ae751a59de219 | fdd67d3733d3db2fb381f25b0985952e3f7c9a4f | /epdAlarm.py | 9fc35afaa5fac12a5bf4482d115b6d71392bd049 | [] | no_license | star-controls/epdAlarm | 56c6ef50616ea4290217b41d0daf3c4ebf7ee952 | 4a966e38116344b9d209dd8efc9abfbbc0e4db5a | refs/heads/master | 2020-03-21T07:51:27.655488 | 2019-03-05T15:04:12 | 2019-03-05T15:04:12 | 138,303,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,072 | py |
from epdchan import epdchan
import paho.mqtt.client as mqtt
from watchdog import watchdog
from softioc import builder
import time
import pandas as pd
#EPD PVs
builder.SetDeviceName('EPD')
#list of all EPD channels as 3-index list
npp = 12
ntile = 31
elist = []
#east/west loop
for ew in range(0,2):
elist.append([])
#PP loop
for ipp in range(0,npp+1):
elist[ew].append([])
#tile loop
for itile in range(ntile+1):
#PP starts at 1, handled in epdchan constructor
elist[ew][ipp].append( epdchan(ew, ipp, itile) )
#watchdog timer for 60 seconds
wdt = watchdog(60, elist)
#file holding alarm limit values
csvlim = "limits.csv"
lframe = pd.read_csv(csvlim)
#set initial alarm values
elist[0][0][0].limits.imon_max = lframe['imon_max'][0]
elist[0][0][0].limits.rmon_min = lframe['rmon_min'][0]
elist[0][0][0].limits.rmon_max = lframe['rmon_max'][0]
elist[0][0][0].limits.temp_max = lframe['temp_max'][0]
#functions to show alarm limits
#_____________________________________________________________________________
def get_imon_max():
return elist[0][0][0].limits.imon_max
#_____________________________________________________________________________
def get_rmon_min():
return elist[0][0][0].limits.rmon_min
#_____________________________________________________________________________
def get_rmon_max():
return elist[0][0][0].limits.rmon_max
#_____________________________________________________________________________
def get_temp_max():
return elist[0][0][0].limits.temp_max
#_____________________________________________________________________________
def put_limit(key, val):
#put limit value to file
lframe[key][0] = val
lframe.to_csv(csvlim, index=False)
#PVs to set alarm limits
#_____________________________________________________________________________
def set_imon_max(val):
elist[0][0][0].limits.imon_max = val
put_limit('imon_max', val)
imon_max_pv = builder.aOut("imon_max", on_update=set_imon_max, initial_value=get_imon_max(), PREC=2)
#_____________________________________________________________________________
def set_rmon_min(val):
elist[0][0][0].limits.rmon_min = val
put_limit('rmon_min', val)
rmon_min_pv = builder.aOut("rmon_min", on_update=set_rmon_min, initial_value=get_rmon_min(), PREC=1)
#_____________________________________________________________________________
def set_rmon_max(val):
elist[0][0][0].limits.rmon_max = val
put_limit('rmon_max', val)
rmon_max_pv = builder.aOut("rmon_max", on_update=set_rmon_max, initial_value=get_rmon_max(), PREC=1)
#_____________________________________________________________________________
def set_temp_max(val):
elist[0][0][0].limits.temp_max = val
put_limit('temp_max', val)
temp_max_pv = builder.aOut("temp_max", on_update=set_temp_max, initial_value=get_temp_max(), PREC=1)
#_____________________________________________________________________________
def init_alarm_limits():
#put initial values to alarm limits PVs
#imon_max_pv.set(get_imon_max())
#rmon_min_pv.set(get_rmon_min())
#rmon_max_pv.set(get_rmon_max())
#temp_max_pv.set(get_temp_max())
pass
#functions for mqtt message
#_____________________________________________________________________________
def get_msg_id(msg, idnam):
#get message id
return ( msg[msg.find(idnam):] ).split('"')[2]
#_____________________________________________________________________________
def process_msg(msg):
#parse the message, get the values, put them to EPD channel objects
#check message validity
if get_msg_id(msg, "dcs_id") != "epd_controller" or get_msg_id(msg, "dcs_uid") != "tonko":
return
wdt.reset()
#message header
hstart = msg.find("[", msg.find("dcs_header")) + 1
hend = msg.find("]")
hlist = msg[hstart:hend].split(",")
id_ew = hlist.index('"fps_quad"')
id_pp = hlist.index('"fps_layer"')
id_tile = hlist.index('"fps_channel"')
id_vslope = hlist.index('"vslope"')
id_vcomp = hlist.index('"temp"')
id_imon = hlist.index('"imon0"')
id_rmon = hlist.index('"rmon0"')
id_state = hlist.index('"state"')
#get values table
vstart = msg.find("{", msg.find("dcs_values")) + 1
vend = msg.find("}", vstart)
vtab = msg[vstart:vend].split("]")
#table lines loop
for i in range(len(vtab)):
if vtab[i] == "":
continue
#list of values
vlist = vtab[i][vtab[i].find("[")+1:].split(",")
#EPD indices
ew = int(vlist[id_ew])
pp = int(vlist[id_pp])
tile = int(vlist[id_tile])
#print repr(ew), repr(pp), repr(tile)
#voltage and current values
epd = elist[ew][pp][tile]
epd.vslope = float(vlist[id_vslope])
epd.vcomp = float(vlist[id_vcomp])
epd.imon = float(vlist[id_imon])
epd.rmon = float(vlist[id_rmon])
epd.state = str(vlist[id_state]).lower().strip('"')
#print repr(epd.ew), repr(epd.pp), repr(epd.tile), repr(epd.vslope), repr(epd.vcomp), repr(epd.imon), repr(epd.rmon)
#put values to PVs in EPD object
epd.pvput()
#mqtt client functions
#_____________________________________________________________________________
def on_connect(client, userdata, flags, rc):
# The callback for when the client receives a CONNACK response from the server.
print("MQTT connected with result code "+str(rc))
client.subscribe("dcs/set/Control/epd/epd_control_fee")
#_____________________________________________________________________________
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
process_msg(msg.payload)
#_____________________________________________________________________________
def read_mqtt():
#initialize alarm limits PVs
init_alarm_limits()
#main mqtt loop
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mq01.starp.bnl.gov")
client.loop_start()
wdt.start()
#watchdog test, 10 sec timeout
#time.sleep(10)
#client.loop_stop()
#print "alarm on 0, 1, 0"
#elist[0][1][0].set_invalid()
#time.sleep(20)
#print "running again"
#client.loop_start()
| [
"[email protected]"
] | |
a06b4cdb26e979978b7442a5953e6661148f9c4d | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /scripts/make_contract_tests.py | c5a713b158970664e7323b7f9745d351a8a8b188 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 5,441 | py | from os.path import join, dirname, exists
from os import mkdir
import json
from conseil import conseil
from conseil.api import ConseilApi
from pytezos import pytezos
from tests import relpath
from tests.templates import michelson_coding_test_case, micheline_coding_test_case, \
test_michelson_parse,test_michelson_format, test_michelson_inverse, test_micheline_inverse
data_dir = join(dirname(dirname(__file__)), 'tests/contracts')
Account = conseil.tezos.babylonnet.accounts
Operation = conseil.tezos.babylonnet.operations
def get_accounts(limit=1):
operations = Operation.query(Operation.destination,
Operation.operation_group_hash.count()) \
.filter(Operation.destination.startswith('KT1'),
Operation.parameters.isnot(None),
Operation.parameters.notlike('Unparsable'),
Operation.kind == 'transaction',
Operation.status == 'applied') \
.order_by(Operation.operation_group_hash.count().desc()) \
.limit(limit) \
.all()
addresses = list(map(lambda x: x['destination'], operations))
accounts = Account.query(Account.account_id, Account.script, Account.storage) \
.filter(Account.account_id.in_(*addresses),
Account.storage.notlike('Unparsable'),
Account.script.notlike('Unparsable')) \
.all()
return accounts
def get_operations(account_id, limit=1):
operations = Operation.query(Operation.block_level.max().label('level'),
Operation.parameters) \
.filter(Operation.destination == account_id,
Operation.parameters.isnot(None),
Operation.parameters.notlike('Unparsable'),
Operation.kind == 'transaction',
Operation.status == 'applied',
Operation.internal.is_(False)) \
.limit(limit) \
.all()
return operations
def find_operation(block_level, destination):
opg_list = pytezos.shell.blocks[block_level].operations.managers()
for opg in opg_list:
for content in opg['contents']:
if content.get('parameters') and content['destination'] == destination:
return content['parameters'], opg['hash']
assert False
def make_package(account, operations=1):
account_dir = join(data_dir, account["account_id"])
if exists(account_dir):
return
else:
mkdir(account_dir)
files = {
'dir': account_dir,
'name': account['account_id'][:6],
'code': [],
'storage': [],
'parameter': []
}
def write_files(michelson, micheline, section, name):
tz_path = join(account_dir, f'{section}_{name}.tz')
json_path = join(account_dir, f'{section}_{name}.json')
with open(tz_path, 'w+') as f:
f.write(michelson)
with open(json_path, 'w+') as f:
f.write(json.dumps(micheline, indent=2))
files[section].append((name, tz_path, json_path))
contract = pytezos.shell.contracts[account['account_id']]()
write_files(
michelson=account['script'],
micheline=contract['script']['code'],
section='code',
name=account['account_id'][:6]
)
write_files(
michelson=account['storage'],
micheline=contract['script']['storage'],
section='storage',
name=account['account_id'][:6]
)
operations = get_operations(account['account_id'], limit=operations)
for operation in operations:
parameters, opg_hash = find_operation(operation['level'], account['account_id'])
write_files(
michelson=operation['parameters'],
micheline=parameters,
section='parameter',
name=opg_hash[:6]
)
return files
def make_michelson_tests(files: dict):
test_case = [
michelson_coding_test_case.format(case=files['name'])
]
for section in ['code', 'storage', 'parameter']:
for name, tz_path, json_path in files[section]:
case = f'{section}_{name}'
test_case.extend([
test_michelson_parse.format(case=case, json_path=relpath(json_path), tz_path=relpath(tz_path)),
test_michelson_format.format(case=case, json_path=relpath(json_path), tz_path=relpath(tz_path)),
test_michelson_inverse.format(case=case, json_path=relpath(json_path))
])
with open(join(files['dir'], f'test_michelson_coding_{files["name"]}.py'), 'w+') as f:
f.write(''.join(test_case))
def make_micheline_tests(files: dict):
test_case = [
micheline_coding_test_case.format(case=files['name'], json_path=relpath(files['code'][0][2]))
]
for section in ['storage', 'parameter']:
for name, tz_path, json_path in files[section]:
case = f'{section}_{name}'
test_case.append(
test_micheline_inverse.format(case=case, json_path=relpath(json_path), section=section)
)
with open(join(files['dir'], f'test_micheline_coding_{files["name"]}.py'), 'w+') as f:
f.write(''.join(test_case))
if __name__ == '__main__':
accounts = get_accounts(limit=100)
for acc in accounts:
package = make_package(acc, operations=7)
if package:
make_michelson_tests(package)
make_micheline_tests(package)
| [
"[email protected]"
] | |
a3b4529f2a8af100e1863c8d7f61d0522f76b1ce | a46646a707b9d747fcf29a86f67a4ccbcbd0ddb9 | /week10/book/76prayme.py | 9a15021619c3da6599d23328531d5d56030c674c | [] | no_license | DevooKim/algorithm-study | 5720642bb43ea364dae924ee038f97379f2ef85b | 830b148defc7f0097abe2f5d3f4e9d8f3333efb0 | refs/heads/main | 2023-02-23T18:40:28.978111 | 2021-01-28T12:09:06 | 2021-01-28T12:09:06 | 302,206,505 | 2 | 1 | null | 2021-01-28T12:09:07 | 2020-10-08T01:54:08 | Python | UTF-8 | Python | false | false | 2,155 | py | import collections
import heapq
import functools
import itertools
import re
import sys
import math
import bisect
from typing import List
class Solution:
def minWindow(self, s: str, t: str) -> str:
# T의 크기부터 점점 키워가기
def contains(s_substr_lst: List, t_lst: List):
for t_elem in t_lst:
if t_elem in s_substr_lst:
s_substr_lst.remove(t_elem)
else:
return False
return True
if not s or not t:
return ''
window_size = len(t)
for size in range(window_size, len(s) + 1):
for left in range(len(s) - size + 1):
s_substr = s[left:left+size]
if contains(list(s_substr), list(t)):
return s_substr
return ''
def two_pointer_with_window(self, s: str, t: str) -> str:
need = collections.Counter(t)
missing = len(t)
left = start = end = 0
for right, char in enumerate(s, 1):
missing -= need[char] > 0
need[char] -= 1
if missing == 0:
while left < right and need[s[left]] < 0:
need[s[left]] += 1
left += 1
if not end or right - left <= end - start:
start, end = left, right
need[s[left]] += 1
missing += 1
left += 1
return s[start:end]
def boo_counter(self, s: str, t: str) -> str:
t_count = collections.Counter(t)
current_count = collections.Counter()
start = float('-inf')
end = float('inf')
left = 0
for right, char in enumerate(s, 1):
current_count[char] += 1
while current_count & t_count == t_count:
if right - left < end - start:
start,end = left, right
current_count[s[left]] -= 1
left += 1
return s[start:end] if end-start <= len(s) else ''
print(Solution().minWindow("ADOBECODEBANC", "ABC")) # "BANC
print(Solution().minWindow("a", "a")) | [
"[email protected]"
] | |
ba739e1e9487460532edf7325747f1c35b66b048 | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1beta1_http_ingress_rule_value.py | 168b201cfa6cc6450e6154e0ffdd4d11d9e0805c | [
"Apache-2.0"
] | permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HTTPIngressRuleValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, paths=None):
"""
V1beta1HTTPIngressRuleValue - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'paths': 'list[V1beta1HTTPIngressPath]'
}
self.attribute_map = {
'paths': 'paths'
}
self._paths = paths
@property
def paths(self):
"""
Gets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:return: The paths of this V1beta1HTTPIngressRuleValue.
:rtype: list[V1beta1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:param paths: The paths of this V1beta1HTTPIngressRuleValue.
:type: list[V1beta1HTTPIngressPath]
"""
if paths is None:
raise ValueError("Invalid value for `paths`, must not be `None`")
self._paths = paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
16b6d2a2bb371aec5835e7f3d24bccfd8b4ec178 | b005d794cfd8e3b063b08d6a266b1e07f0f0f5e9 | /src/webapp/geolist/forms.py | 37418890337878c9eab7f4a4c60577f54493ef96 | [] | no_license | GeoRemindMe/GeoRemindMe_Web | 593c957faa5babb3040da86d94a5d884ad4b2db3 | d441693eedb32c36fe853895110df808a9959941 | refs/heads/master | 2021-01-16T18:29:39.633445 | 2011-11-05T23:50:37 | 2011-11-05T23:50:37 | 1,841,418 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # coding=utf-8
from django import forms
from django.utils.translation import gettext_lazy as _
from georemindme.models_utils import VISIBILITY_CHOICES
class ListRequestedForm(forms.Form):
name = forms.CharField(required=True)
description = forms.CharField(required=False,widget=forms.Textarea())
visibility = forms.ChoiceField(required=True, choices=VISIBILITY_CHOICES)
# only save if it is valid
def save(self, **kwargs):
from geouser.models import User
if not isinstance(kwargs['user'], User):
raise TypeError
from models import ListRequested
if kwargs['id'] is None:
list = ListRequested.insert_list(user=kwargs['user'],
name=self.cleaned_data['name'],
description=self.cleaned_data['description']
) | [
"[email protected]"
] | |
e564cbb6e5bd4a5146b48e57490b98887aa49bcc | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /AtCoder/tkppc/c.py | c001eb99b71d1c90cfe2d44eb70b9b13d6f44518 | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 321 | py | # https://tkppc.contest.atcoder.jp/tasks/tkppc2015_c
N, M = map(int, input().split())
S = int(input())
T = [0 for _ in range(10000)]
for _ in range(N):
t, k = map(int, input().split())
T[t-1] = k
# print(T)
total = 0
d = 0
for i in range(S - 1):
total += T[i]
if total >= M:
d += 1
print(d)
| [
"[email protected]"
] | |
4ab92065962d53964ce2f930d220837337ee3eac | c318bd15c40063639edc95bb8419f4c0f4a2b54f | /update_s3_configuration.py | e520e1541db8ce977a5e0513f0439b48d7e25a29 | [
"MIT"
] | permissive | cwestleyj/HearthstoneJSON | 716fa1b05782d311a04c16c5917ad6e6ae15749a | ed30c943983a4ee0da3a80562655d5a274faad39 | refs/heads/master | 2021-01-19T10:36:59.554294 | 2017-02-10T16:29:13 | 2017-02-10T16:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | #!/usr/bin/env python
import sys
import boto3
from pprint import pprint
API_BUCKET = "api.hearthstonejson.com"
ART_BUCKET = "art.hearthstonejson.com"
def update_website_configuration(s3, build, bucket=API_BUCKET):
print("Querying website configuration for %r" % (bucket))
orig_config = s3.get_bucket_website(Bucket=bucket)
pprint(orig_config)
if "ResponseMetadata" in orig_config:
del orig_config["ResponseMetadata"]
config = orig_config.copy()
config["RoutingRules"] = [{
"Condition": {
"KeyPrefixEquals": "v1/latest/"
},
"Redirect": {
"ReplaceKeyPrefixWith": "v1/%i/" % (build),
"HttpRedirectCode": "302",
"Protocol": "https",
},
}]
if config != orig_config:
print("Updating website configuration")
pprint(config)
s3.put_bucket_website(Bucket=bucket, WebsiteConfiguration=config)
else:
print("Website configuration up-to-date")
def update_art_404_redirects(s3, bucket=ART_BUCKET):
orig_config = s3.get_bucket_website(Bucket=bucket)
if "ResponseMetadata" in orig_config:
del orig_config["ResponseMetadata"]
config = orig_config.copy()
prefixes = [
("v1/orig/", "png", "XXX_001"),
("v1/tiles/", "png", "HERO_01"),
("v1/256x/", "jpg", "XXX_001"),
("v1/512x/", "jpg", "XXX_001"),
]
config["RoutingRules"] = []
for prefix, ext, fallback in prefixes:
config["RoutingRules"].append({
"Condition": {
"HttpErrorCodeReturnedEquals": "404",
"KeyPrefixEquals": prefix,
},
"Redirect": {
"ReplaceKeyWith": prefix + "%s.%s" % (fallback, ext),
"HttpRedirectCode": "302",
"Protocol": "https",
}
})
if config != orig_config:
print("Updating 404 redirects")
pprint(config)
s3.put_bucket_website(Bucket=bucket, WebsiteConfiguration=config)
else:
print("404 redirects up-to-date")
def main():
build = int(sys.argv[1])
s3 = boto3.client("s3")
update_website_configuration(s3, build)
update_art_404_redirects(s3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
731800828469aa1b78563d3dae74e8f7ed296abf | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/point/president/way/kerberos_place_part.py | a02fe413ce8add5d86a79fbfa0f688adb60943f7 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | using System;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Microsoft.Translator.API;
namespace CSharp_TranslateSample
{
public class Program
{
public static string traducida;
public static void Main(string[] args)
{
//TranslateAsync().Wait();
//Console.ReadKey();
}
public static void iniciar() {
TranslateAsync().Wait();
Console.ReadKey();
}
/// Demonstrates getting an access token and using the token to translate.
private static async Task TranslateAsync()
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
traducida = translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty);
private const string SubscriptionKey = "a82656b8f3060cebdca7483b1bf557d2"; //Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
//Console.WriteLine("Translated to French: {0}", translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty));
}
}
}
| [
"[email protected]"
] | |
b5d7d906ca1b952f86510b73dd4b2ab3e980c6db | 283815445952a37e0124801b456844774355733f | /app/models/__init__.py | ffca30aca69e6931817c66d4933a609c5d6bf330 | [] | no_license | paulosjd/woodrecs | 7aa4bec22f2c126bd51023e141f1a113c8faf3d8 | 19a8a53c753ae0978fc092d9a2f6f560dc8644bf | refs/heads/master | 2022-07-07T08:36:06.902801 | 2020-11-08T21:59:00 | 2020-11-08T21:59:00 | 251,587,209 | 0 | 0 | null | 2022-06-22T01:36:43 | 2020-03-31T11:47:53 | Python | UTF-8 | Python | false | false | 183 | py | from .profile import Profile
from .profile_board import ProfileBoard
from .route import Route
from .user import User
__all__ = [
Profile,
ProfileBoard,
Route,
User
]
| [
"[email protected]"
] | |
061b46322d284653c94c803921d86a35f31c4c3a | 8c067089ac94844919c4dc37681c898c0f93819e | /jenkins-master/jobs/scripts/workspace/config.py | c477a45df06ebcddcd194c10096182da65606db8 | [] | no_license | Ramireddyashok/mozmill-ci | 9ac1a5762fa8c14c4802447a9d5878422d2e164a | 0b8c6417e596235cca403ca80947fc328bd2fe8b | refs/heads/master | 2021-04-30T01:27:42.502771 | 2017-06-14T18:24:50 | 2017-06-14T18:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
here = os.path.dirname(os.path.abspath(__file__))
config = {
'test_types': {
'functional': {
'harness_config': os.path.join('firefox_ui_tests', 'qa_jenkins.py'),
'harness_script': os.path.join('firefox_ui_tests', 'functional.py'),
'treeherder': {
'group_name': 'Firefox UI Functional Tests',
'group_symbol': 'Fxfn',
'job_name': 'Firefox UI Functional Tests ({locale})',
'job_symbol': '{locale}',
'tier': 3,
'artifacts': {
'log_info.log': os.path.join(here, 'build', 'upload', 'logs', 'log_info.log'),
'report.html': os.path.join(here, 'build', 'upload', 'reports', 'report.html'),
},
'log_reference': 'log_info.log',
},
},
'update': {
'harness_config': os.path.join('firefox_ui_tests', 'qa_jenkins.py'),
'harness_script': os.path.join('firefox_ui_tests', 'update.py'),
'treeherder': {
'group_name': 'Firefox UI Update Tests - {update_channel}',
'group_symbol': 'Fxup-{update_channel}',
'job_name': 'Firefox UI Update Tests - {update_channel} {locale}-{update_number}',
'job_symbol': '{locale}-{update_number}',
'tier': 3,
'artifacts': {
'log_info.log': os.path.join(here, 'build', 'upload', 'logs', 'log_info.log'),
'report.html': os.path.join(here, 'build', 'upload', 'reports', 'report.html'),
# TODO: Bug 1210753: Move generation of log as option to mozharness
'http.log': os.path.join(here, 'build', 'http.log'),
},
'log_reference': 'log_info.log',
},
},
},
}
| [
"[email protected]"
] | |
b969aff50964ebae5ecd9541c8ed4af2b0ec93fa | 4d99350a527a88110b7bdc7d6766fc32cf66f211 | /OpenGLCffi/GLX/EXT/NV/copy_image.py | 1981ca497e85e50301da73e66cd5b08f9e4f85dd | [
"MIT"
] | permissive | cydenix/OpenGLCffi | e790ef67c2f6c9877badd5c38b7d58961c8739cd | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | refs/heads/master | 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from OpenGLCffi.GLX import params
@params(api='glx', prms=['dpy', 'srcCtx', 'srcName', 'srcTarget', 'srcLevel', 'srcX', 'srcY', 'srcZ', 'dstCtx', 'dstName', 'dstTarget', 'dstLevel', 'dstX', 'dstY', 'dstZ', 'width', 'height', 'depth'])
def glXCopyImageSubDataNV(dpy, srcCtx, srcName, srcTarget, srcLevel, srcX, srcY, srcZ, dstCtx, dstName, dstTarget, dstLevel, dstX, dstY, dstZ, width, height, depth):
pass
| [
"[email protected]"
] | |
f3d1fe716956a41dcaccd88cddd806332ba54e33 | 1b5c3039c05427ad5e731a18e06e0e0accb5ce98 | /scripts/creatematches.py | 2c4bb9fd8b280c5439cdaa0f3eddc508cad483bc | [] | no_license | matthew-brett/beatbased | 1df43cb7f16b4d6cde18acecd7d2b7209887ed89 | f6c7c6bd0fb62efcb3397d512f70717b49f5cccd | refs/heads/master | 2021-01-23T21:42:29.063883 | 2014-05-30T19:05:06 | 2014-05-30T19:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/bin/env python
'''Creates as many matches as possible for metric sequences, with 5-7 intervals'''
import beatsequence as BS
#First, create a list of all combinations of intervals, taking those which add up to 12
print "calculating possible combinations"
S=[]
for length in range(5,10):
L=[4 for n in range(length)]
for i in BS.valueperm(L):
#work out total, gp to next if not 12
total=0
for n in i:
total+=n
if total!=12:continue
i.sort()
if i not in S:
print "added",i
S.append(i)
#now run the match creator on S:
for i in S:
BS.getmatches(i,debug=True)
print i,"completed"
i=raw_input("Finished. Press enter to close")
| [
"[email protected]"
] | |
b2a97343f96ca9246962933acc173b23375b9a5c | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/apps/scratch/rch/mlab/yarn_cs.py | 2d8a38ca1f2761ea2b42d42e5d831bb3cf157889 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 3,840 | py | #-------------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
#
# Created on Jul 22, 2010 by: rch
from numpy import \
loadtxt, ones_like, vstack, c_, hstack, array, cumsum, \
zeros_like, zeros
import wxversion
wxversion.select( '2.8' )
from os.path import join
from promod.simdb import SimDB
simdb = SimDB()
data_dir = join( simdb.exdata_dir, 'trc', 'bond_structure' )
from enthought.tvtk.api import tvtk
from enthought.mayavi.scripts import mayavi2
from enthought.mayavi import mlab
n_slices = 15
start_slice = 4
slice_range = range( start_slice, start_slice + n_slices )
slice_distance = 500 # micrometers
def read_yarn_structure():
slice_point_list = []
slice_radius_list = []
slice_len_list = []
cut_off_start = zeros( ( n_slices, ), dtype = 'int' )
cut_off_start[ 1: ] += 0
for slice_idx, cut_off_idx in zip( slice_range, cut_off_start ):
data_file = join( data_dir, '1cOrientiertSchnitt%d.txt' % slice_idx )
print 'reading data_file'
points = loadtxt( data_file ,
skiprows = 1,
usecols = ( 1, 2, 3 ) )
y = points[ cut_off_idx:, 0]
z = points[ cut_off_idx:, 1]
x = ones_like( y ) * slice_idx * slice_distance
r = points[ cut_off_idx:, 2]
slice_point_list.append( c_[ x, y, z ] )
slice_radius_list.append( r )
slice_len_list.append( points.shape[0] )
lens_arr = array( slice_len_list )
print 'slice lens', lens_arr
offset_arr = cumsum( lens_arr )
slice_offset_arr = zeros_like( offset_arr )
slice_offset_arr[1:] = offset_arr[:-1]
print 'slice offsets', slice_offset_arr
data_file = join( data_dir, 'connectivity.txt' )
filam_connect_arr = loadtxt( data_file )
print filam_connect_arr.shape
print filam_connect_arr.shape
print slice_offset_arr.shape
fil_map = array( filam_connect_arr + slice_offset_arr, dtype = 'int' )
points = vstack( slice_point_list )
radius = hstack( slice_radius_list )
print points.shape
print max( fil_map.flatten() )
p = points[ fil_map.flatten() ]
r = radius[ fil_map.flatten() ]
mlab.plot3d( p[:, 0], p[:, 1], p[:, 2], r,
tube_radius = 20, colormap = 'Spectral' )
offset = array( [0, 3, 6] )
cells = array( [10, 4000, 20, 5005, 20, 4080, 4000, 20, 404 ] )
# line_type = tvtk.Line().cell_type # VTKLine == 10
# cell_types = array( [line_type] )
# # Create the array of cells unambiguously.
# cell_array = tvtk.CellArray()
# cell_array.set_cells( 3, cells )
# Now create the UG.
ug = tvtk.UnstructuredGrid( points = points )
# Now just set the cell types and reuse the ug locations and cells.
# ug.set_cells( cell_types, offset, cell_array )
ug.point_data.scalars = radius
ug.point_data.scalars.name = 'radius'
return ug
# Now view the data.
@mayavi2.standalone
def view( ug ):
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.mayavi.modules.outline import Outline
from enthought.mayavi.modules.surface import Surface
from enthought.mayavi.modules.vectors import Vectors
mayavi.new_scene()
src = VTKDataSource( data = ug )
mayavi.add_source( src )
s = Surface()
mayavi.add_module( s )
if __name__ == '__main__':
ug = read_yarn_structure()
mlab.show() # view( ug )
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
3b6a980ffb87af3580820c10aa1428a173c1618d | 9399d687b2e41245968ba0e9d413a6789d773b1d | /CI/erlang/erlang/libs/fake_ne/interface/FakeNeKeyword.py | d00bfa40b778bc57c8f8d8b44ee00d7e54648cad | [] | no_license | jiangliu888/DemoForSpeed | be41bdb85a1d1f5ca9350a3a1f681ced5ec9b929 | 11319bc19c074327d863ac2813a04cef3487f8d6 | refs/heads/main | 2023-08-23T14:16:21.686155 | 2021-10-17T12:01:34 | 2021-10-17T12:01:34 | 388,452,435 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | import os
from client.device import DeviceClient
from erlang.libs.uranus.interface.EsInterface import EsInterface
from erlang.libs.uranus.interface.UranusInterface import UranusInterface
from erlang.libs.variables import MeasureResultVariables
class FakeNeKeyword(object):
DB_PORT = 3000
DB_REST_PORT = 3500
OFP_REST_PORT = 4000
fake_ne_list = {}
def __init__(self):
pass
@staticmethod
def get_fake_ne_measure_tunnels(neid):
return DeviceClient.get_device_config(int(neid), "TUNNEL")
@staticmethod
def get_fake_ne_measure_tasks(neid):
return DeviceClient.get_device_config(int(neid), "MEASURE")
@staticmethod
def get_fake_ne_measure_task_with_address(ne_id, local_ip, remote_ip):
tasks = FakeNeKeyword.get_fake_ne_measure_tasks(ne_id)
print tasks
return filter(lambda x: (x["remote-ipv4-address"] == remote_ip) and (x["local-ipv4-address"] == local_ip), tasks)
@staticmethod
def get_fake_ne_tunnels_with_dstNeId(local_NeId, dstNeId):
s_id = int(local_NeId) >> 4
tunnels = FakeNeKeyword.get_fake_ne_measure_tunnels(s_id)
print tunnels
return filter(lambda x: x["dst"] == dstNeId, tunnels)
@staticmethod
def get_fake_ne_measure_tasks_with_dstNeId(local_NeId, dstNeId):
s_id = int(local_NeId) >> 4
tasks = FakeNeKeyword.get_fake_ne_measure_tasks(s_id)
print tasks
return filter(lambda x: x["dstNeId"] == dstNeId, tasks)
@staticmethod
def get_fake_ne_flows_id(ne_id):
res = DeviceClient.get_routes(int(ne_id))
return map(int, res) if res else []
@staticmethod
def change_ne_link_measure_result(ne_id, jitter, loss, delay=[0, 0, 0, 0], loss_target=[]):
cmd = "ps -ef |grep create_measure|grep {} |awk {}".format(ne_id, r"'{print $10}'")
r = os.popen(cmd)
info = r.read().split('\n')[0]
print 'info is {}'.format(info)
cmd = "ps -ef |grep create_measure|grep {} |awk {}|xargs sudo kill -9".format(ne_id, r"'{print $2}'")
ret = os.system(cmd)
print 'cmd is {} and ret is {}'.format(cmd, ret)
cmd = "sh -c 'python erlang/libs/fake_ne/create_measure_result.py {} {} {} {} {} {} >> logs/{}measure.log &'".format(info, int(ne_id), ' '.join(jitter), ' '.join(loss), ' '.join(delay), ' '.join(loss_target), int(ne_id))
print cmd
ret = os.system(cmd)
assert ret == 0
@staticmethod
def export_data_to_es(topo_name):
for es_data in MeasureResultVariables.topo(topo_name):
EsInterface.bulk_insert_12_measure_results(es_data['netLink'], es_data['ttl'], es_data['jitter'], es_data['loss'])
@staticmethod
def get_fake_ne_type(ne_id):
rec, ne_info = UranusInterface.get_netcfg_ne_config_with_id(ne_id)
ne_type = ne_info["type"]
return ne_type
| [
"[email protected]"
] | |
4d00ccd7e2aa83e59a80c5067dca230245fd07bc | 09f8a8bb1655cc76a29ac60896d1d42b0145f3c2 | /Utils.py | f617be1131d20d1c307cca7ba5b167e85ef6ea3f | [
"BSD-3-Clause"
] | permissive | FlatL1neAPT/PoshC2_Python | 4d1eb4d6a639395a32a2674ee49a17969a2b8a79 | 39f755f67bf4de15e93f56cd690e50924aa8bba0 | refs/heads/master | 2020-05-04T02:58:16.717780 | 2019-03-08T10:32:55 | 2019-03-08T10:32:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import os, base64, string, random, re
validate_sleep_regex = re.compile("^[0-9]*[smh]$")
def gen_key():
key = os.urandom(256/8)
return base64.b64encode(key)
def formStrMacro(varstr, instr):
holder = []
str1 = ''
str2 = ''
str1 = varstr + ' = "' + instr[:54] + '"'
for i in xrange(54, len(instr), 48):
holder.append(varstr + ' = '+ varstr +' + "'+instr[i:i+48])
str2 = '"\r\n'.join(holder)
str2 = str2 + "\""
str1 = str1 + "\r\n"+str2
return str1
def formStr(varstr, instr):
holder = []
str1 = ''
str2 = ''
str1 = varstr + ' = "' + instr[:56] + '"'
for i in xrange(56, len(instr), 48):
holder.append('"'+instr[i:i+48])
str2 = '"\r\n'.join(holder)
str2 = str2 + "\""
str1 = str1 + "\r\n"+str2
return "%s;" % str1
def randomuri(size = 15, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def validate_sleep_time(sleeptime):
sleeptime = sleeptime.strip()
return validate_sleep_regex.match(sleeptime) | [
"email"
] | email |
fbc34bce75ef0bcc33b60c5c56c4ee439012a1ba | 7e470dd54740ca6331d1341328e344a713329a77 | /src/DQD_counting_statistics/zero_freq_statistics.py | a710fb1843cd96bbb95ceec6215f5f71d5d12580 | [] | no_license | rstones/DQD_counting_statistics | 127eb2ad83c5c69bdfb168975077f541c09d4bbc | 3eb5ad9876b59c43c35150238c3af3396b3ad100 | refs/heads/master | 2020-04-07T03:10:59.294391 | 2017-10-22T10:58:06 | 2017-10-22T10:58:06 | 53,421,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | '''
Created on 8 Mar 2016
@author: rstones
'''
import numpy as np
import quant_mech.utils as utils
from DQD_counting_statistics.DQD_model import DQDModel
import matplotlib.pyplot as plt
bias_values = np.array([0, 1.5, 3., 4.5, 6.])
Gamma_R_range = np.logspace(-4, 3, 1000)
model = DQDModel(remove_elements=True)
current = np.zeros((bias_values.size, Gamma_R_range.size))
F2 = np.zeros((bias_values.size, Gamma_R_range.size))
coherence = np.zeros((bias_values.size, Gamma_R_range.size), dtype='complex')
for i,v in enumerate(bias_values):
model.bias = v
for j,Gamma_R in enumerate(Gamma_R_range):
model.Gamma_R = Gamma_R
ss = utils.stationary_state_svd(model.liouvillian(), model.density_vector_populations())
current[i,j] = model.mean(ss)
F2[i,j] = model.second_order_fano_factor(ss)
coherence[i,j] = ss[2]
np.savez('../../data/DQD_zero_freq_counting_statistics_data.npz', Gamma_R_range=Gamma_R_range, bias_values=bias_values, current=current, F2=F2, coherence=coherence)
fig,(ax1,ax2,ax3) = plt.subplots(1,3)
for i,v in enumerate(bias_values):
ax1.semilogx(Gamma_R_range, current[i], label=v)
ax2.semilogx(Gamma_R_range, F2[i], label=v)
ax3.semilogx(Gamma_R_range, np.real(coherence[i]), label=v)
ax1.legend().draggable()
ax2.legend().draggable()
ax3.legend().draggable()
plt.show()
| [
"[email protected]"
] | |
28459452020b3f9d921767c1fd75d3f868741f99 | 26f23588e80acc2b28d4cc70a8fbcf78c5b33a20 | /PythonModels/learnBasic/file_options.py | 4173a88638e76c5058927e4ba42da592ecbd3ca6 | [] | no_license | Timehsw/PythonCouldbeEverything | aa31b3e32bf68b49fe8e96b971637353a8ef644f | 85d4f1a2c93c7b1edc34ceb9e8bb3c8d7beb30e9 | refs/heads/master | 2021-01-01T15:38:25.253094 | 2018-01-22T06:49:05 | 2018-01-22T06:49:05 | 97,661,530 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # coding=utf8
__author__ = 'zenith'
#读文件
f=open("D:\data.txt","r")
#print(f.read())
#print(f.readline().strip())
#print(f.readline().strip())
for line in f.readlines():
print(line.strip())
f.close()
#文件追加内容
f=open("D:\data.txt","a")
f.write("\n超人学院")
f.close()
#文件覆盖内容
f=open("D:\data.txt","w")
f.write("\n超人学院")
f.close()
| [
"[email protected]"
] | |
4ec82c4d69562c103864beb83bc5eac587470077 | 1af49694004c6fbc31deada5618dae37255ce978 | /third_party/blink/renderer/bindings/scripts/bind_gen/__init__.py | 44c068af8ca05cd83d23acbbb3e0bc2dfd11be14 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] | permissive | sadrulhc/chromium | 59682b173a00269ed036eee5ebfa317ba3a770cc | a4b950c23db47a0fdd63549cccf9ac8acd8e2c41 | refs/heads/master | 2023-02-02T07:59:20.295144 | 2020-12-01T21:32:32 | 2020-12-01T21:32:32 | 317,678,056 | 3 | 0 | BSD-3-Clause | 2020-12-01T21:56:26 | 2020-12-01T21:56:25 | null | UTF-8 | Python | false | false | 2,353 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sys
# Set up |sys.path| so that this module works without user-side setup of
# PYTHONPATH assuming Chromium's directory tree structure.
def _setup_sys_path():
expected_path = 'third_party/blink/renderer/bindings/scripts/bind_gen/'
this_dir = os.path.dirname(__file__)
root_dir = os.path.abspath(
os.path.join(this_dir, *(['..'] * expected_path.count('/'))))
module_dirs = (
# //third_party/blink/renderer/bindings/scripts/web_idl
os.path.join(root_dir, 'third_party', 'blink', 'renderer', 'bindings',
'scripts'),
# //third_party/blink/renderer/build/scripts/blinkbuild
os.path.join(root_dir, 'third_party', 'blink', 'renderer', 'build',
'scripts'),
# //third_party/mako/mako
os.path.join(root_dir, 'third_party', 'mako'),
)
for module_dir in reversed(module_dirs):
# Preserve sys.path[0] as is.
# https://docs.python.org/3/library/sys.html?highlight=path[0]#sys.path
sys.path.insert(1, module_dir)
_setup_sys_path()
from .callback_function import generate_callback_functions
from .callback_interface import generate_callback_interfaces
from .dictionary import generate_dictionaries
from .enumeration import generate_enumerations
from .interface import generate_interfaces
from .namespace import generate_namespaces
from .task_queue import TaskQueue
from .union import generate_unions
def init(web_idl_database_path, root_src_dir, root_gen_dir, component_reldirs):
"""
Args:
web_idl_database_path: File path to the web_idl.Database.
root_src_dir: Project's root directory, which corresponds to "//" in GN.
root_gen_dir: Root directory of generated files, which corresponds to
"//out/Default/gen" in GN.
component_reldirs: Pairs of component and output directory.
"""
from . import package_initializer
package_initializer.init(web_idl_database_path=web_idl_database_path,
root_src_dir=root_src_dir,
root_gen_dir=root_gen_dir,
component_reldirs=component_reldirs)
| [
"[email protected]"
] | |
50d41bc04b35250d86a4adb67e67092dd7f34b51 | 34339da2c834d79c9d3142afb8c498c62fb8917d | /thenewboston_node/blockchain/tasks/debug_task.py | 5af50cb9f0a73b2cb20d0323ab22fd1023029219 | [
"MIT"
] | permissive | olegtropinin/thenewboston-node | 5abfcbe02404f7c5347af724fb06c7f6420226ba | 2de4e14ef6855646121840224a82fcfc505b213c | refs/heads/master | 2023-08-23T09:33:25.286098 | 2021-10-14T22:53:15 | 2021-10-14T22:53:15 | 417,582,617 | 0 | 0 | MIT | 2021-10-15T17:27:52 | 2021-10-15T17:27:51 | null | UTF-8 | Python | false | false | 190 | py | # TODO(dmu) HIGH: Remove this example task once real tasks are created
from celery import shared_task
@shared_task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
| [
"[email protected]"
] | |
088a093e36d31ff4a4fc4890cd0ea0a3f98a32e7 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_BoxCox/trend_MovingMedian/cycle_30/ar_/test_artificial_32_BoxCox_MovingMedian_30__100.py | eaa3d461b45d44c18f2e1bbaffa799b7393f51fd | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 265 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
] | |
f9a3518f256b925c3a31d214b721e8d53706123e | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /backend/api/v2beta1/python_http_client/test/test_v2beta1_runtime_config.py | ea104015c08bff480428f747f9b1fe16d1dd0715 | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 1,580 | py | # coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.v2beta1_runtime_config import V2beta1RuntimeConfig # noqa: E501
from kfp_server_api.rest import ApiException
class TestV2beta1RuntimeConfig(unittest.TestCase):
"""V2beta1RuntimeConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V2beta1RuntimeConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.v2beta1_runtime_config.V2beta1RuntimeConfig() # noqa: E501
if include_optional :
return V2beta1RuntimeConfig(
parameters = {
'key' : None
},
pipeline_root = '0'
)
else :
return V2beta1RuntimeConfig(
)
def testV2beta1RuntimeConfig(self):
"""Test V2beta1RuntimeConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
355ea3438068cb566e9bb686ad11c0e9bbcbe658 | 20f86ba7febb3233516f0e2161bc98604c302cc4 | /tests/acceptance/page_model/base_page.py | 165ad2e4da698093c6351465a36d33bb5fb659c4 | [] | no_license | LukaszMalucha/Machine-Learning-Dashboard | 5369270ff39710f2c6545ea0828f01265d7e797f | 3ee29498d7d74365a2cd33547795ddfe9573dac4 | refs/heads/master | 2022-12-10T05:08:52.693425 | 2019-03-14T11:15:54 | 2019-03-14T11:15:54 | 126,514,014 | 8 | 3 | null | 2022-12-08T01:33:30 | 2018-03-23T16:52:05 | Python | UTF-8 | Python | false | false | 1,023 | py | from tests.acceptance.locators.base_page import BasePageLocators
class BasePage:
def __init__(self, driver):
self.driver = driver
@property
def url(self):
return 'http://127.0.0.1:5000'
@property
def title(self):
return self.driver.find_element(*BasePageLocators.TITLE)
@property
def navigation(self):
return self.driver.find_elements(*BasePageLocators.NAV_LINKS)
@property
def dropdown(self):
return self.driver.find_element(*BasePageLocators.DROPDOWN)
@property
def dropdown_links(self):
return self.driver.find_elements(*BasePageLocators.DROPDOWN_LINKS)
@property
def table(self):
return self.driver.find_element(*BasePageLocators.TABLE)
@property
def github_user(self):
return self.driver.find_element(*BasePageLocators.GITHUB_USER)
@property
def github_repos(self):
return self.driver.find_element(*BasePageLocators.GITHUB_REPOS)
| [
"[email protected]"
] | |
fc4dd2aeebba0b006b2c867b0c71b235f777c216 | 4737df4162bee6abc7b78d1e8b4930d2cb542d6b | /graphgallery/nn/layers/pytorch/conv/dagnn.py | 24f060cea3fb5cf39695a42498b8ea286a211594 | [
"MIT"
] | permissive | freebird3366/GraphGallery | d1aa4ff291753ccf0ac4a8e024d18c59d2db8aa8 | f3294dad35ca0e14a525ed48f18feae2e9af661f | refs/heads/master | 2023-02-23T20:04:30.316450 | 2021-02-01T16:06:03 | 2021-02-01T16:06:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import torch
import torch.nn as nn
from graphgallery.nn.init.pytorch import uniform, zeros
from ..get_activation import get_activation
class PropConvolution(nn.Module):
def __init__(self,
in_channels,
out_channels=1,
K=10,
use_bias=False,
activation=None):
super().__init__()
assert out_channels == 1, "'out_channels' must be 1"
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = get_activation(activation)
self.w = nn.Linear(in_channels, out_channels, bias=use_bias)
self.K = K
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, x, adj):
propagations = [x]
for _ in range(self.K):
x = torch.spmm(adj, x)
propagations.append(x)
h = torch.stack(propagations, axis=1)
retrain_score = self.w(h)
retrain_score = self.activation(retrain_score).permute(0, 2, 1).contiguous()
out = (retrain_score @ h).squeeze(1)
return out
def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels} -> {self.out_channels})"
| [
"[email protected]"
] | |
0180fb50fcc9a71e70b3ccce51b1092d8db51019 | 09ecd5f17ff36896c141db58563de3887d3f627d | /src/accounts/forms.py | ce6f9e63345af563ce7d020d907191aa2146429a | [] | no_license | samirthapa20/tweetme | df9b43bc8be4975343a54cceebba0f259ab6a6dd | 23d77575b85f8f6ff5d8993d3bbbf3898c1e6671 | refs/heads/master | 2021-05-23T15:32:18.136662 | 2020-09-09T14:18:37 | 2020-09-09T14:18:37 | 253,362,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class UserRegisterForm(forms.Form):
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget = forms.PasswordInput)
password2 = forms.CharField(widget= forms.PasswordInput)
def clean_password2(self):
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password != password2:
raise forms.ValidationError('Password must match')
return password2
def clean_username(self):
username = self.cleaned_data.get('username')
if User.objects.filter(username__icontains=username).exists():
raise forms.ValidationError("This username is taken")
return username
def clean_email(self):
email = self.cleaned_data.get('email')
if User.objects.filter(email__icontains=email).exists():
raise forms.ValidationError("This email is already taken.")
return email
| [
"[email protected]"
] | |
7fb4f71a9ccc64dc5c65d6bf095c6e49af56ef7a | 6820e74ec72ed67f6b84a071cef9cfbc9830ad74 | /plans/migrations/0008_auto_20150401_2155.py | dbca0bd39f59f421e402b58652c15b1cbd599a57 | [
"MIT"
] | permissive | AppforallHQ/f5 | 96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd | 0a85a5516e15d278ce30d1f7f339398831974154 | refs/heads/master | 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import plans.models
class Migration(migrations.Migration):
dependencies = [
('plans', '0007_auto_20150330_0046'),
]
operations = [
migrations.CreateModel(
name='ItemInvoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.IntegerField(validators=[plans.models.neg_validator])),
('plan_amount', models.IntegerField(validators=[plans.models.neg_validator])),
('created_at', models.DateTimeField(auto_now_add=True)),
('paid', models.BooleanField(default=False)),
('pay_time', models.DateTimeField(null=True, blank=True)),
('invalid', models.BooleanField(default=False)),
('metadata', jsonfield.fields.JSONField()),
('generated_promo_code', models.ForeignKey(related_name='+', to='plans.PromoCode')),
('plan', models.ForeignKey(to='plans.Plan', null=True)),
('promo_code', models.ForeignKey(default=None, blank=True, to='plans.PromoCode', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='giftinvoice',
name='generated_promo_code',
),
migrations.RemoveField(
model_name='giftinvoice',
name='plan',
),
migrations.RemoveField(
model_name='giftinvoice',
name='promo_code',
),
migrations.DeleteModel(
name='GiftInvoice',
),
]
| [
"[email protected]"
] | |
16093e48e8ea6d2c734900b39e59e698fffa2edc | 29bec83fc600720533ad2bcf17fc90cd9ca385b7 | /0x06-python-classes/prueba_OOP.py | 19e79b242fb1d7fd13ef39f02007e7cc9e743a28 | [] | no_license | VictorZ94/holbertonschool-higher_level_programming | 73a7f504cde583f43f641e18e692e062610870a4 | ad512a1c76dc9b4c999a0ba2922c79f56206dd98 | refs/heads/master | 2023-03-25T04:38:12.708766 | 2021-03-24T01:08:47 | 2021-03-24T01:08:47 | 291,826,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/python3
class coche():
largochasis = 250
anchochasis = 120
ruedas = 4
enmarcha = False
def arrancar(self):
self.enmarcha=True
def estado(self):
if (self.enmarcha):
return "El coche está en marcha"
else:
return "El coche está parado"
micoche=coche()
print(micoche.largochasis)
print(micoche.ruedas)
# micoche.arrancar()
(print(micoche.estado())) | [
"[email protected]"
] | |
bb5630d590dfe6c1e987d8698e11bff0633d156d | 7a803cd0c16ff676e3d7ecc33ec5e7af2c42d026 | /hello.py | f3123c66e088b6ec25c1b96a658855cae387ee88 | [] | no_license | emetowinner/VGG-Internship-Assignments- | ddc798da4c91572455d4f69b0a0524def13be268 | 67fa5b345b0981dd43694b72d5fc61f45d431c19 | refs/heads/master | 2020-12-15T03:40:37.321894 | 2020-03-05T00:46:39 | 2020-03-05T00:46:39 | 234,981,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #Learning Integer Literals
birth_month = input('Enter birth month: ')
if type(birth_month) == str:
print('Not an integer')
if type(birth_month) != int:
print('Converting to int now.........')
int(birth_month)
print('....................')
print('Now is of int type') | [
"[email protected]"
] | |
68d80aabd964ecc0c03b3c58dbb4409ea535aea0 | 9381d2a25adac95fab9fc4b8015aadd6c7bed6ca | /ITP1/8_A.py | cf8098d7b4993f62b0cc1f7fe90d16e436e1b142 | [] | no_license | kazuma104/AOJ | e3ca14bd31167656bcd203d4f92a43fd4045434c | d91cc3313cbfa575928787677e5ed6be63aa8acf | refs/heads/master | 2023-03-20T22:16:22.764351 | 2021-03-18T10:38:08 | 2021-03-18T10:38:08 | 262,047,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def solve():
S = input()
T = ""
for i in range(len(S)):
if S[i] >= 'a':
T += S[i].upper()
else:
T += S[i].lower()
print(T)
if __name__ == '__main__':
solve() | [
"[email protected]"
] | |
755c8410856fd9a634ed73e87e50ec135313c22b | 1f3bed0bb480a7d163dab73f1d315741ecbc1072 | /vtkplotter_examples/pyplot/plot7_stream.py | 04cb003af2b8799fd539ccebed6d1317312814c5 | [
"MIT"
] | permissive | ismarou/vtkplotter-examples | 1ce78197182da7496b016b27f1d5eb524c49cac6 | 1eefcc026be169ab7a77a5bce6dec8044c33b554 | refs/heads/master | 2021-03-11T18:43:22.313457 | 2020-03-03T22:11:25 | 2020-03-03T22:11:25 | 246,551,341 | 4 | 0 | null | 2020-03-11T11:18:48 | 2020-03-11T11:18:47 | null | UTF-8 | Python | false | false | 672 | py | """Plot streamlines of the 2D field:
u(x,y) = -1 - x^2 + y
v(x,y) = 1 + x - y^2
"""
from vtkplotter import *
import numpy as np
# a grid with a vector field (U,V):
X, Y = np.mgrid[-5:5 :15j, -4:4 :15j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
# optionally, pick some random points as seeds:
prob_pts = np.random.rand(200, 2)*8 - [4,4]
sp = streamplot(X,Y, U,V,
lw=0.001, # line width in abs. units
direction='forward', # 'both' or 'backward'
probes=prob_pts, # try comment out this
)
pts = Points(prob_pts, r=5, c='white')
show(sp, pts,
Text2D(__doc__, c='w'),
axes=1, bg='bb')
| [
"[email protected]"
] | |
7c945592d39eb2f6680b846f93d8f8921188613c | 0ed9a8eef1d12587d596ec53842540063b58a7ec | /cloudrail/knowledge/rules/rules_loader.py | 443b78c547b07c45631f401fffc28e2ebc664574 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cbc506/cloudrail-knowledge | 8611faa10a3bf195f277b81622e2590dbcc60da4 | 7b5c9030575f512b9c230eed1a93f568d8663708 | refs/heads/main | 2023-08-02T08:36:22.051695 | 2021-09-13T15:23:33 | 2021-09-13T15:24:26 | 390,127,361 | 0 | 0 | MIT | 2021-07-27T21:08:06 | 2021-07-27T21:08:06 | null | UTF-8 | Python | false | false | 1,710 | py | import functools
from typing import Dict, Optional
from cloudrail.knowledge.context.cloud_provider import CloudProvider
from cloudrail.knowledge.exceptions import UnsupportedCloudProviderException
from cloudrail.knowledge.rules.base_rule import BaseRule
from cloudrail.knowledge.rules.aws_rules_loader import AwsRulesLoader
from cloudrail.knowledge.rules.azure_rules_loader import AzureRulesLoader
from cloudrail.knowledge.rules.gcp_rules_loader import GcpRulesLoader
class RulesLoader:
@classmethod
def load(cls, cloud_provider: Optional[CloudProvider] = None) -> Dict[str, BaseRule]:
if not cloud_provider:
return {**AwsRulesLoader().load(), **AzureRulesLoader().load(), **GcpRulesLoader().load()}
if cloud_provider == CloudProvider.AMAZON_WEB_SERVICES:
return AwsRulesLoader().load()
if cloud_provider == CloudProvider.AZURE:
return AzureRulesLoader().load()
if cloud_provider == CloudProvider.GCP:
return GcpRulesLoader().load()
raise UnsupportedCloudProviderException(cloud_provider)
@classmethod
@functools.lru_cache(maxsize=None)
def get_rules_source_control_links(cls) -> Dict[str, str]:
rules = cls.load()
source_control_links = {}
for rule_id, rule in rules.items():
rule_module = rule.__module__
if not rule_module.startswith('cloudrail.knowledge'):
continue
rule_path = rule_module.replace('.', '/')
source_control_link = f'https://github.com/indeni/cloudrail-knowledge/blob/main/{rule_path}.py'
source_control_links[rule_id] = source_control_link
return source_control_links
| [
"[email protected]"
] | |
f6b845b799f3e15e52f10efd5b2ba60a4d5e1fb8 | da687718aa8ce62974090af63d25e057262e9dfe | /cap12-dicionarios/10_fromkeys_method.py | 59b1594ed08737b3f91bb025905c1d9639f0eab5 | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/env python3
"""retorna um novo dicionário cujas chaves são os elementos de uma sequencia e cujos
valores são todos iguais ao argumento valor.
Sintaxe: dict.fromkeys(seq[, value])
"""
seq = ['name', 'age', 'sex']
dict = dict.fromkeys(seq)
print('Novo dicionario: {}'.format(str(dict))) # nenhum valor foi definido para 'value'
# definido o valor 10 para o argumento value
# dict = dict.fromkeys(seq, 10)
# print('Novo dicionario: {}'.format(str(dict)))
| [
"[email protected]"
] | |
b2f6ba810f56fe21e915805b75b08d7c0443d9fc | 8fb7a7b4fb09ce457ad413d19191235cf4805851 | /notes code/detection of fail/object_only/scan_mark1/find_thing_on_print_bed.py | 1a6610a59ee5df617f1a2396d94b2e6a3a5120ce | [] | no_license | clambering-goat/honner-progect | df8ab2e22c223cf0f8cb59b93b132eea3d9030f2 | ea996ea34ac13867dea6d4935f9760c6915b206f | refs/heads/master | 2020-04-15T19:32:57.303438 | 2019-05-13T17:51:56 | 2019-05-13T17:51:56 | 164,954,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py |
import numpy as np
import cv2
data=np.load("data.npy")
iamge = data.astype(np.uint8)
for Ycount,y in enumerate(iamge):
for x_count,x in enumerate(y):
if iamge[Ycount][x_count]==255:
iamge[Ycount][x_count] =0
cv2.imshow("frame",iamge)
cv2.waitKey(20000)
| [
"[email protected]"
] | |
9179210109a8fa035ce59bb29a6582ddd74d25fd | d9d9a203a27bd28fe9afc72ecc613b186b33d673 | /06_MultipleForm/mainform.py | fe80a611cdf948f4f13e439c5959ffe08d143681 | [] | no_license | wildenali/Belajar-GUI-dengan-pyQT | 378951fcf0e172f48bf71ec46d887599cf5e09ed | 06ebbcbf57bec8a6a63fbb6d5397a7e2ab7c9ef9 | refs/heads/master | 2020-04-06T10:51:58.582049 | 2018-12-31T10:37:56 | 2018-12-31T10:37:56 | 157,395,034 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from PyQt5.QtWidgets import QWidget, QPushButton
from otherform import *
class MainForm(QWidget):
def __init__(self):
super(MainForm, self).__init__()
self.setupUI()
def setupUI(self):
self.resize(300, 500)
self.move(400, 200)
self.setWindowTitle('Form nya ada DUA')
self.button = QPushButton('Muncuuuul kan')
self.button.move(50,50)
self.button.setParent(self)
self.button.clicked.connect(self.buttonClick)
def buttonClick(self):
self.form = OtherForm()
self.form.show()
| [
"[email protected]"
] | |
28598f36e66c74da10b429d228ad8e96cb136f00 | aaf9df2f15ec9bbfb7d98c2239db940117bc6762 | /Algorithmic-Toolbox/covering_segments/covering_segments.py | e33a1707cc450ddd489122ddb82f546ec7713987 | [
"MIT"
] | permissive | ugaliguy/Data-Structures-and-Algorithms | db50a0f4b39908d17fa125ca70c0616f52d895d2 | 4bcbd1b0cff66f442a03d06393f654f8e3a61ded | refs/heads/master | 2021-01-21T14:08:42.127708 | 2016-07-04T00:43:38 | 2016-07-04T00:43:38 | 56,821,728 | 0 | 1 | null | 2016-07-04T00:43:39 | 2016-04-22T02:54:23 | Python | UTF-8 | Python | false | false | 854 | py | # Uses python3
import sys
from collections import namedtuple
from operator import attrgetter
Segment = namedtuple('Segment', 'start end')
def optimal_points(segments):
start_sort = sorted(segments, key=attrgetter('start'))
end_sort = sorted(segments, key=attrgetter('end'))
points = []
#write your code here
minimum = start_sort[0].start - 1
for i in range(len(segments)):
begin = end_sort[i].start
end = end_sort[i].end
if begin > minimum:
points.append(end)
minimum = end
return points
if __name__ == '__main__':
input = sys.stdin.read()
n, *data = map(int, input.split())
segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))
points = optimal_points(segments)
print(len(points))
for p in points:
print(p, end=' ')
| [
"[email protected]"
] | |
4370545f8a75330aec51c5b699aada3f8df69d5c | 4e4c22dfabb1a0fa89f0f51f58737273412a30e0 | /fort_machine/wsgi.py | 640e612427bbf2c0356ea849505b08617eed3925 | [] | no_license | shaoqianliang/fort_machine | 4cb271d5ef29c924c09172ff397e2af8562ee4ba | cf7e3d4c6682831ce04bcde478930ab7e85abb01 | refs/heads/master | 2020-04-28T15:24:02.056674 | 2019-04-12T23:50:35 | 2019-04-12T23:50:35 | 175,372,042 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for fort_machine project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fort_machine.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
fe7d0f99e3ae6e1f339a1cd8e4642a724e9016f7 | 1b1e8e73649ad1eed89556a5d479b0a549354fd5 | /opennem/db/migrations/versions/4bf86ff5c8ff_update_indicies_that_aren_t_used.py | a7a46b84637e88e727f1fe594938c21feb0ebb3f | [
"MIT"
] | permissive | zalihat/opennem | 3ea8db7246f350fb0eacf8c6078dbffa4fe9aea2 | 0f82e4fc3fd2bcfbf56a2741d89e4228d017dcf3 | refs/heads/master | 2023-02-27T15:37:47.206336 | 2021-02-08T07:28:57 | 2021-02-08T07:28:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | # pylint: disable=no-member
"""
update indicies that aren't used
Revision ID: 4bf86ff5c8ff
Revises: 64987ea01b57
Create Date: 2020-11-23 02:54:29.564574
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "4bf86ff5c8ff"
down_revision = "64987ea01b57"
branch_labels = None
depends_on = None
def upgrade():
op.drop_index("idx_facility_scada_trading_interval_perth_year")
op.drop_index("idx_facility_scada_trading_interval_perth_month")
op.drop_index("idx_facility_scada_trading_interval_perth_day")
op.drop_index("idx_facility_scada_trading_interval_perth_hour")
op.drop_index("idx_balancing_summary_trading_interval_perth_year")
op.drop_index("idx_balancing_summary_trading_interval_perth_month")
op.drop_index("idx_balancing_summary_trading_interval_perth_day")
op.drop_index("idx_balancing_summary_trading_interval_perth_hour")
op.drop_index("idx_facility_scada_trading_interval_sydney_year")
op.drop_index("idx_facility_scada_trading_interval_sydney_month")
op.drop_index("idx_facility_scada_trading_interval_sydney_day")
op.drop_index("idx_facility_scada_trading_interval_sydney_hour")
op.drop_index("idx_balancing_summary_trading_interval_sydney_year")
op.drop_index("idx_balancing_summary_trading_interval_sydney_month")
op.drop_index("idx_balancing_summary_trading_interval_sydney_day")
op.drop_index("idx_balancing_summary_trading_interval_sydney_hour")
def downgrade():
pass
| [
"[email protected]"
] | |
a99c4d3cb68c551d8ecf9d307608d40a13d95cd8 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0701-0800/0716-Max Stack/0716-Max Stack.py | 698d5125b3a7f7ace31abc4055e1c827e468fd5e | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 967 | py | class MaxStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.St = []
self.maxSt = []
def push(self, x: int) -> None:
self.St.append(x)
maxX = x if not self.maxSt or x > self.maxSt[-1] else self.maxSt[-1]
self.maxSt.append(maxX)
def pop(self) -> int:
self.maxSt.pop()
return self.St.pop()
def top(self) -> int:
return self.St[-1]
def peekMax(self) -> int:
return self.maxSt[-1]
def popMax(self) -> int:
maxX = self.maxSt[-1]
buffer = []
while self.St[-1] != maxX:
buffer.append(self.pop())
self.pop()
while buffer:
self.push(buffer.pop())
return maxX
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
| [
"[email protected]"
] | |
ffaa35bbff6e5594111a59aeed63bc26897a2692 | 0b12e31cafa598c163d2cc53706df193a73e31e3 | /people/models.py | 65c21cc6090d290ebf5ac91ed163dedd5de88207 | [] | no_license | getopen/pro | 6a4dba774558e1de0419a4c6daf030ee360d68fd | 97e939d26d9fdaf54f05f3cd4a9b32a6722d0ac3 | refs/heads/master | 2021-07-06T09:35:18.077577 | 2017-09-30T16:07:06 | 2017-09-30T16:07:06 | 100,471,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,517 | py | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.utils import timezone
import hashlib
import random
import string
from django.conf import settings
SALT = getattr(settings, "EMAIL_TOKEN_SALT")
class MyUserManager(BaseUserManager):
def create_user(self, username, email, password=None):
if not email :
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have an username')
#判断邮件和用户名是否具有
now = timezone.now()
#获取当前django的时间
user = self.model(
username=username,
email=self.normalize_email(email),
date_joined=now,
last_login=now,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
user = self.create_user(username,
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
#新版用户表
class Member(AbstractBaseUser):
#AbstractBaseUser中只含有3个field: password, last_login和is_active.
email = models.EmailField(verbose_name='邮箱',max_length=255,unique=True,)
username = models.CharField(verbose_name="用户名", max_length=16, unique=True)
weibo_id = models.CharField(verbose_name="新浪微博", max_length=30, blank=True)
blog = models.CharField(verbose_name="个人网站", max_length=200, blank=True)
location = models.CharField(verbose_name="城市", max_length=10, blank=True)
profile = models.CharField(verbose_name="个人简介", max_length=140, blank=True)
avatar = models.CharField(verbose_name="头像", max_length=128, blank=True)
au = models.IntegerField(verbose_name="用户活跃度", default=0)
last_ip = models.GenericIPAddressField(verbose_name="上次访问IP", default="0.0.0.0")
email_verified = models.BooleanField(verbose_name="邮箱是否验证", default=False)
date_joined = models.DateTimeField(verbose_name="用户注册时间", default=timezone.now)
topic_num = models.IntegerField(verbose_name="帖子数", default=0)
comment_num = models.IntegerField(verbose_name="评论数", default=0)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
#objects就是我们之前一直使用的管理器
#管理器用来维护我们的增删改查
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.username
#标签中的数据实例
def is_email_verified(self):
return self.email_verified
#我们可以在模板中,通过实例出来的对象数据进行这个函数的调取,获取他是否验证过
def get_weibo(self):
return self.weibo_id
def get_username(self):
return self.username
#方法的圆括号在templates标签中必需省略!!
def get_email(self):
return self.email
#方法的圆括号在templates标签中必需省略!!
def get_full_name(self):
# The user is identified by their email address
return self.email
#get_full_name本来是获取first_name和last_name的
#但是由于我们重新设置了表结构,那么这个函数必须自定义
#方法的圆括号在templates标签中必需省略!!
def get_short_name(self):
# The user is identified by their email address
return self.username
#get_short_name获取first_name
#但是由于我们重新设置了表结构,那么这个函数必须自定义
#方法的圆括号在templates标签中必需省略!!
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
return True
def calculate_au(self):
"""
计算活跃度
公式:Topic * 5 + Comment * 1
"""
self.au = self.topic_num * 5 + self.comment_num * 1
return self.au
@property
#类中函数可以直接做为属性使用
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class Follower(models.Model):
"""
用户的关系表
B is the follower of A
B 是 A 的关注者
A 被 B 关注
"""
user_a = models.ForeignKey(Member, related_name="user_a",verbose_name='偶像')
user_b = models.ForeignKey(Member, related_name="user_b",verbose_name='粉丝')
date_followed = models.DateTimeField(default=timezone.now,verbose_name='关注时间')
class Meta:
unique_together = ('user_a', 'user_b')
def __str__(self):
return "%s following %s" % (self.user_b, self.user_a)
class EmailVerified(models.Model):
user = models.OneToOneField(Member, related_name="user")
token = models.CharField("Email 验证 token", max_length=32, default=None)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return "%s@%s" % (self.user, self.token)
def generate_token(self):
year = self.timestamp.year
month = self.timestamp.month
day = self.timestamp.day
date = "%s-%s-%s" % (year, month, day)
token = hashlib.md5((self.ran_str()+date).encode('utf-8')).hexdigest()
return token
def ran_str(self):
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return salt + SALT
class FindPass(models.Model):
user = models.OneToOneField(Member, verbose_name="用户")
token = models.CharField(max_length=32, blank=True)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return "%s@%s" % (self.user, self.token)
def generate_token(self):
year = self.timestamp.year
month = self.timestamp.month
day = self.timestamp.day
date = "%s-%s-%s" % (year, month, day)
token = hashlib.md5((self.ran_str()+date).encode('utf-8')).hexdigest()
return token
def ran_str(self):
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return salt + SALT
| [
"[email protected]"
] | |
954e1a81cae9daf62bf9cb9cf0f83299c3e8a038 | 8b942cbd6a0da0a61f68c468956ba318c7f1603d | /dynamic_programming/0053_maximum_subarray.py | 4ed3786dd1ebedf430bdbe2dfaceed01c1a79c9e | [
"MIT"
] | permissive | MartinMa28/Algorithms_review | 080bd608b0e0c6f39c45f28402e5181791af4766 | 3f2297038c00f5a560941360ca702e6868530f34 | refs/heads/master | 2022-04-13T03:56:56.932788 | 2020-04-06T03:41:33 | 2020-04-06T03:41:33 | 203,349,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Each slot means the sum of the max-subarray that ends at this index
dp = [float('-inf')] * len(nums)
"""
dp[i] = max((nums[i], dp[i - 1] + nums[i]))
"""
dp[0] = nums[0]
for i in range(1, len(nums)):
dp[i] = max((nums[i], dp[i - 1] + nums[i]))
return max(dp) | [
"[email protected]"
] | |
a399301c523887d5bcc02002c2d2c1ac09e638a1 | 07cf86733b110a13224ef91e94ea5862a8f5d0d5 | /permutations/permutations.py | 9adc376b32f889d512681c06e31fc88b05902f97 | [] | no_license | karsevar/Code_Challenge_Practice | 2d96964ed2601b3beb324d08dd3692c3d566b223 | 88d4587041a76cfd539c0698771420974ffaf60b | refs/heads/master | 2023-01-23T17:20:33.967020 | 2020-12-14T18:29:49 | 2020-12-14T18:29:49 | 261,813,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | py | # First attempt most test cases didn't pass. Perhaps I miss read the permutation
# requirements for this problem.
class Solution:
def permute(self, nums):
# create a permutations array that will hold all the possible
# permutations
# create a recursive function that will have a start argument, nums argument,
# and a permutation argument
# if permutation is equal to the length len(nums) and not none
# add permutation to the permutations array
# if permutation is less than the lenght of len(nums)
# have a for loop that will start at range(start, len(nums) + 1)
# recursively call the recursive function
permutations = []
nums_length = len(nums)
def permutation_helper(nums, nums_length, permutation=None, variable_exclude=None):
if permutation != None and len(permutation) == nums_length:
permutations.append(permutation)
elif permutation == None or len(permutation) < nums_length:
for number in nums:
if permutation == None:
new_permutation = []
variable_exclude = number
new_permutation.append(number)
permutation_helper(nums, nums_length, new_permutation, variable_exclude)
elif permutation != None and variable_exclude != number and number != permutation[-1]:
new_permutation = permutation[:]
new_permutation.append(number)
permutation_helper(nums, nums_length, new_permutation, variable_exclude)
permutation_helper(nums, nums_length)
return permutations
class OfficialSolution:
def permute(self, nums):
# create a permutations array that will hold all the possible
# permutations
# create a recursive function that will have a start argument, nums argument,
# and a permutation argument
# if permutation is equal to the length len(nums) and not none
# add permutation to the permutations array
# if permutation is less than the lenght of len(nums)
# have a for loop that will start at range(start, len(nums) + 1)
# recursively call the recursive function
permutations = []
nums_length = len(nums)
def permutation_helper(index, perm, nums_length):
if index == len(perm):
permutations.append(list(perm))
for i in range(index, len(perm)):
print('permutation', perm)
print('index', index)
perm[index], perm[i] = perm[i], perm[index]
permutation_helper(index+1, perm, nums_length)
perm[index], perm[i] = perm[i], perm[index]
permutation_helper(0, nums, nums_length)
return permutations | [
"[email protected]"
] | |
588ff9f9d1fd2b83d89b92f998ad98b57b5b6142 | ec513ac551fc0bbb6c8af5b30330445bf52c6c7f | /location_monitor/src/location_monitor_node.py | e907ab747f1eb280bbd66076673f3279e2518249 | [] | no_license | ChuChuIgbokwe/me495_tutorials | b88c4833f35e50b51a4ccaa1a4bae5a1916e12bf | b03e74605cf469d818c4533f3d563622e7d14552 | refs/heads/master | 2020-04-06T07:06:08.360123 | 2016-09-18T08:46:01 | 2016-09-18T08:46:01 | 64,951,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# #Created by Chukwunyere Igbokwe on July 27, 2016 by 2:23 PM
# import rospy
# import math
# from nav_msgs.msg import Odometry
# from location_monitor.msg import LandmarkDistance
# def distance(x1, y1, x2, y2):
# xd = x1 - x2
# yd = y1 - y2
# return math.sqrt(xd*xd + yd*yd)
# class LandmarkMonitor(object):
# def __init__(self,landmark_pub, landmarks):
# self._landmark_pub = landmark_pub
# self._landmarks = landmarks
# def callback(self,msg):
# x = msg.pose.pose.position.x
# y = msg.pose.pose.position.y
# # rospy.loginfo("x: {}, y: {}".format(x,y))
# closest_name = None
# closest_distance = None
# for l_name,l_x, l_y in self._landmarks:
# dist = distance(x, y, l_x, l_y)
# if closest_distance is None or dist < closest_distance:
# closest_name = l_name
# closest_distance = dist
# ld = LandmarkDistance()
# ld.name = closest_name
# ld.distance = closest_distance
# self._landmark_pub.publish(ld)
# if closest_distance < 0.5:
# rospy.loginfo("I'm near the {}".format(closest_name))
# # rospy.loginfo("closest : {}".format(closest_name))
# def main():
# rospy.init_node('location_monitor_node')
# landmarks = []
# landmarks.append(("Cube", 0.31, -0.99));
# landmarks.append(("Dumpster", 0.11, -2.42));
# landmarks.append(("Cylinder", -1.14, -2.88));
# landmarks.append(("Barrier", -2.59, -0.83));
# landmarks.append(("Bookshelf", -0.09, 0.53));
# landmark_pub = rospy.Publisher("closest_landmark", LandmarkDistance, queue_size=10)
# monitor = LandmarkMonitor(landmark_pub,landmarks)
# rospy.Subscriber("/odom", Odometry, monitor.callback)
# try:
# rospy.spin()
# except KeyboardInterrupt:
# print("Shutting down")
# if __name__ == '__main__':
# main()
#your python node and package/message should always have different names
import rospy
from nav_msgs.msg import Odometry
import math
landmarks = []
landmarks.append(("Cube",0.31,-0.99));
landmarks.append(("Dumpster", 0.11,-2.42));
landmarks.append(("Cylinder", -1.14,-2.88));
landmarks.append(("Barrier", -2.59,-0.83));
landmarks.append(("Bookshelf", -0.09, 0.53));
def distance(x1, y1, x2, y2):
xd = x1 - x2
yd = y1 - y2
return math.sqrt(xd*xd + yd*yd)
def callback(msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
# rospy.loginfo("x: {}, y: {}".format(x,y))
closest_name = None
closest_distance = None
for l_name,l_x, l_y in landmarks:
dist = distance(x, y, l_x, l_y)
if closest_distance is None or dist < closest_distance:
closest_name = l_name
closest_distance = dist
rospy.loginfo("Landmark: {} || Distance: {}".format(closest_name,closest_distance))
def main():
rospy.init_node('location_monitor')
rospy.Subscriber("/odom", Odometry, callback)
rospy.spin()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
202384744bc82b1b11a8752e20a41b61b8c14117 | 30ab9750e6ca334941934d1727c85ad59e6b9c8a | /zentral/contrib/monolith/management/commands/rebuild_manifest_enrollment_packages.py | 4311863e71bf807a69d5cdb8a2dda5713092f8ef | [
"Apache-2.0"
] | permissive | ankurvaishley/zentral | 57e7961db65278a0e614975e484927f0391eeadd | a54769f18305c3fc71bae678ed823524aaa8bb06 | refs/heads/main | 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 | Apache-2.0 | 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null | UTF-8 | Python | false | false | 484 | py | from django.core.management.base import BaseCommand
from zentral.contrib.monolith.models import ManifestEnrollmentPackage
from zentral.contrib.monolith.utils import build_manifest_enrollment_package
class Command(BaseCommand):
help = 'Rebuild monolith manifest enrollment packages.'
def handle(self, *args, **kwargs):
for mep in ManifestEnrollmentPackage.objects.all():
build_manifest_enrollment_package(mep)
print(mep.file.path, "rebuilt")
| [
"[email protected]"
] | |
bdbf224d07f9a5aeceb878a2ff696537cb9fd117 | 3633bab8066f576c8bf9e7908afe30bb070d0b70 | /Hack-tenth-week/cinema/website/management/commands/populate_db.py | f9afe0316b2236021528fb773fea671a5c9bdfe8 | [] | no_license | 6desislava6/Hack-Bulgaria | 099c195e45a443cf4a3342eff6612ac2aa66565b | de4bf7baae35e21d6a7b27d4bde68247bb85b67a | refs/heads/master | 2021-01-20T11:57:29.027595 | 2015-06-02T17:36:59 | 2015-06-02T17:36:59 | 32,828,816 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | from django.core.management.base import BaseCommand
from website.models import Movie, Projection, Reservation
class Command(BaseCommand):
def _add_movies(self):
Movie.add_movie(name='The Green Mile', rating=9.0)
Movie.add_movie(name='Stay Alive', rating=6.0)
Movie.add_movie(name='Twenty-Seven Dresses', rating=5.0)
Movie.add_movie(name='Inception', rating=9.0)
Movie.add_movie(name='The Hunger Games: Catching Fire', rating=7.9)
Movie.add_movie(name='Wreck-It Ralph', rating=7.8)
Movie.add_movie(name='Her', rating=8.3)
def _delete_movies(self):
Movie.objects.all().delete()
def _delete_projections(self):
Projection.objects.all().delete()
def _add_projections(self):
Projection.add_projection(movie=Movie.objects.get(name='The Green Mile'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Stay Alive'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Twenty-Seven Dresses'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Inception'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='The Hunger Games: Catching Fire'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Wreck-It Ralph'), type_projection='3D', date='2015-05-19', time='18:00')
def _add_reservations(self):
Reservation.add_reservation(username='desi', row='1', col='1', projection=Projection.objects.get(movie__name='The Green Mile'))
Reservation.add_reservation(username='marmot', row='1', col='1', projection=Projection.objects.get(movie__name='Inception'))
def handle(self, *args, **options):
self._add_movies()
self._add_projections()
self._add_reservations()
| [
"[email protected]"
] | |
c084bf927837edbff9f1738b44a08d195446fec2 | 35fa8925e63f2b0f62ef6bfc1ff4e03cf42bd923 | /tests/models/output/definitions/test_output_definition.py | dc3f2047a98052437876efa3ed6a308349469e6b | [
"Apache-2.0"
] | permissive | TheLabbingProject/django_analyses | 9e6f8b9bd2a84e8efe6dda6a15de6a3ecdf48ec1 | 5642579660fd09dde4a23bf02ec98a7ec264bceb | refs/heads/master | 2023-02-26T07:53:53.142552 | 2023-02-17T08:12:17 | 2023-02-17T08:12:17 | 225,623,958 | 1 | 2 | Apache-2.0 | 2023-02-17T08:12:18 | 2019-12-03T13:15:29 | Python | UTF-8 | Python | false | false | 5,861 | py | from django.core.exceptions import ValidationError
from django.test import TestCase
from django_analyses.models.input.definitions.file_input_definition import \
FileInputDefinition
from django_analyses.models.managers.output_definition import \
OutputDefinitionManager
from django_analyses.models.output.definitions.output_definition import \
OutputDefinition
from django_analyses.models.output.types.file_output import FileOutput
from tests.factories.output.definitions.output_definition import \
OutputDefinitionFactory
class OutputDefinitionTestCase(TestCase):
"""
Tests for the
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
model.
"""
def setUp(self):
"""
Adds the created instances to the tests' contexts.
For more information see unittest's :meth:`~unittest.TestCase.setUp` method.
"""
self.output_definition = OutputDefinitionFactory()
##########
# Meta #
##########
def test_ordering(self):
"""
Test the `ordering`.
"""
self.assertTupleEqual(OutputDefinition._meta.ordering, ("key",))
def test_output_class_is_none(self):
"""
Tests that the *output_class* class attribute is set to None. This is
meant to be overriden by a
:class:`~django_analyses.models.output.output.Output` instance.
"""
self.assertIsNone(OutputDefinition.output_class)
def test_custom_manager_is_assigned(self):
"""
Tests that the manager is assigned to be the custom
:class:`~django_analyses.models.managers.output_definition.OutputDefinitionManager`
class.
"""
self.assertIsInstance(OutputDefinition.objects, OutputDefinitionManager)
##########
# Fields #
##########
# key
def test_key_max_length(self):
"""
Test the max_length of the *key* field.
"""
field = self.output_definition._meta.get_field("key")
self.assertEqual(field.max_length, 50)
def test_key_is_not_unique(self):
"""
Tests that the *key* field is not unique.
"""
field = self.output_definition._meta.get_field("key")
self.assertFalse(field.unique)
def test_key_blank_and_null(self):
"""
Tests that the *key* field may not be blank or null.
"""
field = self.output_definition._meta.get_field("key")
self.assertFalse(field.blank)
self.assertFalse(field.null)
# description
def test_description_blank_and_null(self):
"""
Tests that the *description* field may be blank or null.
"""
field = self.output_definition._meta.get_field("description")
self.assertTrue(field.blank)
self.assertTrue(field.null)
###########
# Methods #
###########
def test_string(self):
"""
Test the string output.
"""
value = str(self.output_definition)
expected = self.output_definition.key
self.assertEqual(value, expected)
def test_create_output_instance_raises_type_error(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
raises a ValidationError. This is the expected behavior as long as the
output_class attribute is not defined (or ill defined).
"""
with self.assertRaises(ValidationError):
self.output_definition.create_output_instance()
def test_create_output_instance_with_non_model_value_raises_type_error(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
with a non-model value raises a ValidationError.
"""
self.output_definition.output_class = str
with self.assertRaises(ValidationError):
self.output_definition.create_output_instance()
def test_create_output_instance_with_non_output_subclass_value_raises_type_error(
self,
):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
with a non-:class:`~django_analyses.models.output.output.Output`
model subclass value raises a ValidationError.
"""
self.output_definition.output_class = FileInputDefinition
with self.assertRaises(ValidationError):
self.output_definition.check_output_class_definition()
def test_resetting_output_class_to_valid_output_subclass(self):
"""
Tests that the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.check_output_class_definition`
method does not raise a ValidationError when setting *output_class* to
some valid Output model subclass.
"""
self.output_definition.output_class = FileOutput
try:
self.output_definition.check_output_class_definition()
except ValidationError:
self.fail(
"Failed to set output_definition output_class to a valid Output subclass!"
)
def test_create_output_instance_reraises_uncaught_exception(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
method when *output_class* is properly set but invalid kwargs still
raises an exception.
"""
self.output_definition.output_class = FileOutput
with self.assertRaises(ValueError):
self.output_definition.create_output_instance()
| [
"[email protected]"
] | |
af9bf4858b5793e1641a6963e2f7e683b1de3f12 | 1adc548f1865c0e4fcb3b3ff1049789fa0c72b12 | /tests/observes/test_column_property.py | 058383a5651f5433d39e0d4606bda3d52d6f5663 | [] | no_license | wujuguang/sqlalchemy-utils | ca826a81acdc70168e0b85820aaf8fe1604d6b0a | b6871980a412f2ebd16ec08be3127814b42ba64e | refs/heads/master | 2021-01-12T20:59:48.692539 | 2016-01-15T08:06:48 | 2016-01-18T18:52:12 | 48,418,840 | 0 | 0 | null | 2015-12-22T08:05:48 | 2015-12-22T08:05:47 | null | UTF-8 | Python | false | false | 1,582 | py | import sqlalchemy as sa
from pytest import raises
from sqlalchemy_utils.observer import observes
from tests import TestCase
class TestObservesForColumn(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
@observes('price')
def product_price_observer(self, price):
self.price = price * 2
self.Product = Product
def test_simple_insert(self):
product = self.Product(price=100)
self.session.add(product)
self.session.flush()
assert product.price == 200
class TestObservesForColumnWithoutActualChanges(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
@observes('price')
def product_price_observer(self, price):
raise Exception('Trying to change price')
self.Product = Product
def test_only_notifies_observer_on_actual_changes(self):
product = self.Product()
self.session.add(product)
self.session.flush()
with raises(Exception) as e:
product.price = 500
self.session.commit()
assert str(e.value) == 'Trying to change price'
| [
"[email protected]"
] | |
c2e9ac93f8629983cb977f8a65caf9dee5bfceaa | 80760d4c8a6b2c45b4b529bdd98d33c9c5509438 | /Practice/atcoder/ABC/054/src/c2.py | 007ef8de5fd091ec21679eb96f94eb5ea1f9c5f2 | [] | no_license | prrn-pg/Shojin | f1f46f8df932df0be90082b475ec02b52ddd882e | 3a20f1122d8bf7d95d9ecd205a62fc36168953d2 | refs/heads/master | 2022-12-30T22:26:41.020473 | 2020-10-17T13:53:52 | 2020-10-17T13:53:52 | 93,830,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # 全域木?っていうんだっけ?でもコストは関係ないか
# 適当に隣接リストでもってしてDFSする
N, M = map(int, input().split())
Neighbor_list = [[] for _ in range(N)]
for _ in range(M):
s, t = map(int, input().split())
Neighbor_list[s-1].append(t-1)
Neighbor_list[t-1].append(s-1)
def dfs(cur, path):
if len(path) == N:
return 1
else:
ret = 0
for neighbor in Neighbor_list[cur]:
if neighbor not in path:
next_list = path[:]
next_list.append(neighbor)
ret += dfs(neighbor, next_list)
return ret
print(dfs(0, [0]))
| [
"[email protected]"
] | |
cd53fdab752cc6628b086d089002c796748479b8 | e09bbdc53af6be9281795189f26f6e59997abf68 | /tests/test_forex.py | eeb783520060d238446a4a97fba67b6f1d7c96a9 | [
"Apache-2.0"
] | permissive | jag787/ppQuanTrade | 620ce72c7875bb730708c48ae0481376b43e501b | 9a6da7522d281da130a2c459e2e614a75daa543d | refs/heads/master | 2021-01-11T13:53:40.583710 | 2013-12-20T10:43:58 | 2013-12-20T10:43:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,100 | py | #
# Copyright 2013 Xavier Bruhiere
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Tests for the forex datasource
'''
from unittest import TestCase
from nose.tools import timed
from neuronquant.data.forex import ConnectTrueFX
#from neuronquant.utils.datautils import FX_PAIRS
DEFAULT_TIMEOUT = 15
EXTENDED_TIMEOUT = 90
class TestForex(TestCase):
'''
Forex access through TrueFX provider
!! Beware that truefx server will return empty array
if currencies were not updated since last call
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_connection_credentials(self):
'''
Use explicit TrueFx username and password account for
authentification
'''
client = ConnectTrueFX(user='Gusabi', password='quantrade')
# If succeeded, an authentification for further use was returned by
# truefx server
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_default_auth_file(self):
'''
If no credentials, the constructor tries to find it
reading config/default.json
'''
# It's default behavior, nothing to specifie
client = ConnectTrueFX()
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_custom_auth_file(self):
'''
If no credentials, the constructor tries to find it
reading given json file
'''
client = ConnectTrueFX(auth_file='plugins.json')
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_without_auth(self):
''' TrueFX API can be used without credentials in a limited mode '''
#FIXME Fails to retrieve limited values
client = ConnectTrueFX(user=None, password=None, auth_file='fake.json')
assert client._code == 'not authorized'
def test_connection_with_pairs(self):
pairs = ['EUR/USD', 'USD/JPY']
client = ConnectTrueFX(pairs=pairs)
### Default call use pairs given during connection
dataframe = client.QueryTrueFX()
for p in pairs:
assert p in dataframe.columns
@timed(DEFAULT_TIMEOUT)
def test_query_default(self):
pass
def test_query_format(self):
pass
def test_query_pairs(self):
pass
def test_response_formating(self):
pass
def test_detect_active(self):
pass
def test_standalone_request(self):
pass
| [
"[email protected]"
] | |
e32fadc710671ee0d561a5192a3e0c6875072673 | ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89 | /unep.project-database/tags/0.2/content/Project.py | d13c620db2288f39c6b8598a0df372dc144dd473 | [] | no_license | jean/project-database | 65a2559844175350351ba87e820d25c3037b5fb2 | e818d322ec11d950f2770cd5324fbcd1acaa734d | refs/heads/master | 2021-01-01T06:27:24.528764 | 2014-01-31T11:11:45 | 2014-01-31T11:11:45 | 32,125,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,359 | py | # -*- coding: utf-8 -*-
#
# File: Project.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 2.0
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Jean Jordaan <[email protected]>, Jurgen Blignaut
<[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
import interfaces
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.ATVocabularyManager.namedvocabulary import NamedVocabulary
from Products.ProjectDatabase.config import *
# additional imports from tagged value 'import'
from Products.ProjectDatabase.widgets.SelectedLinesField import SelectedLinesField
from Products.CMFCore.utils import getToolByName
from Products.FinanceFields.MoneyField import MoneyField
from Products.FinanceFields.MoneyWidget import MoneyWidget
from Products.DataGridField import DataGridField, DataGridWidget, Column, SelectColumn, CalendarColumn
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
import Project
import Financials
from Products.CMFCore.utils import getToolByName
from Products.FinanceFields.Money import Money
##code-section module-header #fill in your manual code here
del Project
from Products.ProjectDatabase.content.FMIFolder import FMIFolder
from Products.ProjectDatabase.content.MonitoringAndEvaluation import MonitoringAndEvaluation
from Products.ProjectDatabase.content.ProjectGeneralInformation import ProjectGeneralInformation
from Products.ProjectDatabase.content.MilestoneFolder import MilestoneFolder
import permissions
##/code-section module-header
schema = Schema((
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
Project_schema = BaseFolderSchema.copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class Project(BaseFolder, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IProject)
meta_type = 'Project'
_at_rename_after_creation = True
schema = Project_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
security.declarePublic('getLeadAgencies')
def getLeadAgencies(self):
"""
"""
catalog = getToolByName(self, 'portal_catalog')
proxies = catalog(portal_type='Agency')
pl = [p.getObject().Title() for p in proxies]
return ','.join(pl)
security.declarePublic('getVocabulary')
def getVocabulary(self, vocabName):
"""
"""
pv_tool = getToolByName(self, 'portal_vocabularies')
vocab = pv_tool.getVocabularyByName(vocabName)
return vocab.getDisplayList(vocab)
security.declarePublic('getProjectGeneralInformation')
def getProjectGeneralInformation(self):
"""
"""
return self['project_general_info']
security.declarePublic('getAProject')
def getAProject(self):
"""
"""
return self
registerType(Project, PROJECTNAME)
# end of class Project
##code-section module-footer #fill in your manual code here
##/code-section module-footer
| [
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
] | jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d |
66fa92e9025251b90129308bd92a3f521649690c | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /models/research/domain_adaptation/domain_separation/dsn_test.py | 3d687398a9b9356455f739417bc96ddb2ca5ad40 | [
"MIT",
"Apache-2.0"
] | permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 6,027 | py | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DSN model assembly functions."""
import numpy as np
import tensorflow as tf
import dsn
class HelperFunctionsTest(tf.test.TestCase):
def testBasicDomainSeparationStartPoint(self):
with self.test_session() as sess:
# Test for when global_step < domain_separation_startpoint
step = tf.contrib.slim.get_or_create_global_step()
sess.run(tf.global_variables_initializer()) # global_step = 0
params = {'domain_separation_startpoint': 2}
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1e-10)
step_op = tf.assign_add(step, 1)
step_np = sess.run(step_op) # global_step = 1
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1e-10)
# Test for when global_step >= domain_separation_startpoint
step_np = sess.run(step_op) # global_step = 2
tf.logging.info(step_np)
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1.0)
class DsnModelAssemblyTest(tf.test.TestCase):
def _testBuildDefaultModel(self):
images = tf.to_float(np.random.rand(32, 28, 28, 1))
labels = {}
labels['classes'] = tf.one_hot(
tf.to_int32(np.random.randint(0, 9, (32))), 10)
params = {
'use_separation': True,
'layers_to_regularize': 'fc3',
'weight_decay': 0.0,
'ps_tasks': 1,
'domain_separation_startpoint': 1,
'alpha_weight': 1,
'beta_weight': 1,
'gamma_weight': 1,
'recon_loss_name': 'sum_of_squares',
'decoder_name': 'small_decoder',
'encoder_name': 'default_encoder',
}
return images, labels, params
def testBuildModelDann(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelDannSumOfPairwiseSquares(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelDannMultiPSTasks(self):
images, labels, params = self._testBuildDefaultModel()
params['ps_tasks'] = 10
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelMmd(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'mmd_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelCorr(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'correlation_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelNoDomainAdaptation(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none',
params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 1)
self.assertEqual(len(tf.contrib.losses.get_regularization_losses()), 0)
def testBuildModelNoAdaptationWeightDecay(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
params['weight_decay'] = 1e-5
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none',
params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 1)
self.assertTrue(len(tf.contrib.losses.get_regularization_losses()) >= 1)
def testBuildModelNoSeparation(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 2)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
310a2ff7d5c25b08fd026424c91c406d6dce04a7 | 8e4a5e0a81fc9401fc0b6e55dd55e8d6e29c3ed6 | /PycharmProjects/licamb/licamb/db.py | 56e07023c14dd0a9ab4cc3e86d345f33321735e3 | [] | no_license | rogeriodelphi/portifolio | 1fb16c8c723b97f20cdd305224b660a1657f3913 | 5c704305ce26576afb4efd1e410f691971f06fac | refs/heads/master | 2023-08-11T05:33:37.539047 | 2021-09-26T01:57:02 | 2021-09-26T01:57:02 | 284,164,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SQLITE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
POSTGRESQL = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'db',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '5432',
}
}
| [
"[email protected]"
] | |
e610e2ff68b9264be3b2f2e6659c8a516cad7e27 | eb136fec7f6dfcb11834cc0cd4d3daec1d7a4dc6 | /fiasco_api/expenses/migrations/0001_initial.py | 40ad410a30d5561dfacbc245e35bd26e587ef388 | [
"MIT"
] | permissive | xelnod/fiasco_backend | 4635cff2fd220585c4433010e64208dfebbf2441 | edeca8cac8c7b1a1cc53051d4443cc2996eba37c | refs/heads/master | 2020-09-21T13:37:37.971952 | 2020-09-15T19:38:37 | 2020-09-15T19:38:37 | 224,804,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | # Generated by Django 3.1.1 on 2020-09-13 21:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('categories', '0001_initial'),
('channels', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExpenseProto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('comment', models.TextField(blank=True, null=True)),
('amount', models.IntegerField(default=0)),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='channels.channel')),
('kit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='categories.kit')),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('expenseproto_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='expenses.expenseproto')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_fulfilled', models.BooleanField(default=True)),
('money_stored', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=('expenses.expenseproto', models.Model),
),
migrations.CreateModel(
name='OngoingExpense',
fields=[
('expenseproto_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='expenses.expenseproto')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('scope', models.IntegerField(choices=[(0, 'Month'), (1, 'Year')], default=0)),
],
options={
'abstract': False,
},
bases=('expenses.expenseproto', models.Model),
),
]
| [
"[email protected]"
] | |
895a6ff291a61e66f00fd311bf599cf8fdb80ba1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/list_workspaceusers_request.py | 03fe5cb9797831a7a33f080679f078e6c5bedd22 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,633 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListWorkspaceusersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'workspace_id': 'str',
'limit': 'str',
'offset': 'str'
}
attribute_map = {
'workspace_id': 'workspace_id',
'limit': 'limit',
'offset': 'offset'
}
def __init__(self, workspace_id=None, limit=None, offset=None):
"""ListWorkspaceusersRequest
The model defined in huaweicloud sdk
:param workspace_id: 工作空间id
:type workspace_id: str
:param limit: 数据条数限制
:type limit: str
:param offset: 偏移量
:type offset: str
"""
self._workspace_id = None
self._limit = None
self._offset = None
self.discriminator = None
self.workspace_id = workspace_id
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def workspace_id(self):
"""Gets the workspace_id of this ListWorkspaceusersRequest.
工作空间id
:return: The workspace_id of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this ListWorkspaceusersRequest.
工作空间id
:param workspace_id: The workspace_id of this ListWorkspaceusersRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def limit(self):
"""Gets the limit of this ListWorkspaceusersRequest.
数据条数限制
:return: The limit of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListWorkspaceusersRequest.
数据条数限制
:param limit: The limit of this ListWorkspaceusersRequest.
:type limit: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListWorkspaceusersRequest.
偏移量
:return: The offset of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListWorkspaceusersRequest.
偏移量
:param offset: The offset of this ListWorkspaceusersRequest.
:type offset: str
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListWorkspaceusersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b5bfc185e3c0e76fb33a254d444155ab0931f2c8 | f723b36a64d7c5ccd2a4937d02f05279fc9e907c | /calls/urls.py | 48317b35fa4b2d6bacf2ee72c3c3734774b5c08e | [] | no_license | DmitrySham/grand-django-site | 92259098d209954ee5f5c994989f6c1f7c9826f4 | e65988c441e9fb37fd15126d28301c47643b501d | refs/heads/master | 2023-01-22T08:37:08.921212 | 2023-01-13T15:05:30 | 2023-01-13T15:05:30 | 184,014,992 | 0 | 0 | null | 2022-12-04T20:45:03 | 2019-04-29T06:44:37 | JavaScript | UTF-8 | Python | false | false | 145 | py | from django.urls import path
from calls import views
urlpatterns = [
path('ajax/call/request/', views.call_request, name='calls_request')
]
| [
"[email protected]"
] | |
86b082d38e2f308f0a9eb3f9b74eb82523828273 | b478d1e63cce432b6fd3692c0aa7a84f411ae9dc | /meta_py3/main.py | b2fcdb9da12e44315b927e032eb6c0442104b5d4 | [] | no_license | yiqing95/py_study | 8d414aa00b4ac31070fe5667a98815980eee46d0 | 6ce6b46ad729a795bc9253d6339169e62ef47766 | refs/heads/master | 2016-09-06T17:45:26.081269 | 2015-01-12T15:22:29 | 2015-01-12T15:22:29 | 20,810,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from meta_py3 import example2
__author__ = 'yiqing'
from meta_py3.example import *
from meta_py3.helper import printHr
p = Point(3,4)
print(p.x)
printHr()
obj = example2.MyClass(3)
print(obj.x)
| [
"[email protected]"
] | |
17e37b200e4daabdb7bde731b5f7ece860ff30f5 | 9f440599da392a55d7d5b2b7ce571bc3f2dc881e | /rhea/cores/usbext/fpgalink/__init__.py | 40502351eaf29688fab9e182e67fd1cd214d5167 | [
"MIT"
] | permissive | zignig/rhea | 713559f688f85e1304ab43c2b871553da3bf01ae | e0d04ff4fcbd57dfeb6f84fa8f87d6b03caee590 | refs/heads/master | 2020-04-06T06:53:33.541215 | 2016-03-15T12:45:23 | 2016-03-15T12:45:23 | 53,943,632 | 1 | 0 | null | 2016-03-15T12:42:06 | 2016-03-15T12:42:06 | null | UTF-8 | Python | false | false | 196 | py |
from __future__ import absolute_import
from . import _fpgalink_fx2 as fpgalink
from ._fpgalink_fx2 import get_interfaces
from ._fpgalink_fx2 import fpgalink_fx2
from ._fl_convert import convert
| [
"[email protected]"
] | |
92103249322b421545629318572a095a6464b746 | 46bd3e3ba590785cbffed5f044e69f1f9bafbce5 | /env/lib/python3.8/site-packages/supervisor/tests/test_dispatchers.py | 3f88376a16df1a07247d1fe031d2147a0cb4d10c | [] | no_license | adamkluk/casper-getstarted | a6a6263f1547354de0e49ba2f1d57049a5fdec2b | 01e846621b33f54ed3ec9b369e9de3872a97780d | refs/heads/master | 2023-08-13T11:04:05.778228 | 2021-09-19T22:56:59 | 2021-09-19T22:56:59 | 408,036,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b2039ef9d32ffde70df065c6a333cb150fa31e79786df3f98287dc41938ad1e1
size 53720
| [
"[email protected]"
] | |
7401b94189214c99484961a6a267429cd5e290fb | 19f27f432b968521c7bee497a96f2b01963da293 | /manage.py | 0ff8346ecebe236c0d31d614ad2ceeab700db026 | [] | no_license | ethanlee6/myw | eae3eb751f4b06e06ce1dd2a21adf9272f1bf72f | 74c60ebea5519c18d7495c2ee8064b4a576b9b89 | refs/heads/master | 2021-01-24T18:39:43.481407 | 2017-03-15T12:15:01 | 2017-03-15T12:15:01 | 84,459,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import os
from flask.ext.script import Manager, Server
from flask.ext.script.commands import ShowUrls
from flask.ext.migrate import Migrate, MigrateCommand
from webapp import create_app
from webapp.models import db, User, Post, Tag, Comment
# default to dev config
env = os.environ.get('WEBAPP_ENV', 'dev')
app = create_app('webapp.config.%sConfig' % env.capitalize())
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("server", Server())
#manager.add_command("show-urls", ShowUrls())
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(
app=app,
db=db,
User=User,
Post=Post,
Tag=Tag,
Comment=Comment
)
if __name__ == "__main__":
manager.run()
| [
"[email protected]"
] | |
bb4411845beac8ed6a855d3894786bb21f41fa05 | 5179b07b8d1a31df18612ce55d35c56b851cead8 | /tools/train.py | b0290aace7813a3edf21acd4895698b235e05300 | [
"Apache-2.0"
] | permissive | hamidehkerdegari/VFS | 3e9c427c4a8ae0a6b66a3a1378bac5c6f9daaf51 | 8e055cc191578706f05b7484facf44be6fb1525a | refs/heads/master | 2023-08-24T09:40:46.678233 | 2021-09-26T18:24:38 | 2021-09-26T18:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,658 | py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist, set_random_seed
from mmaction import __version__
from mmaction.apis import train_model
from mmaction.datasets import build_dataset
from mmaction.models import build_model
from mmaction.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='automatically resume training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--suffix', type=str, help='work_dir suffix')
parser.add_argument(
'--disable-wandb', action='store_true', help='disable wandb')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
print('cudnn_benchmark=True')
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority:
# CLI > config file > default (base filename)
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.suffix is not None:
cfg.work_dir = f'{cfg.work_dir}-{args.suffix}'
for i, h in enumerate(cfg.log_config.hooks):
if h.type == 'WandbLoggerHook':
if args.disable_wandb:
cfg.log_config.hooks.pop(i)
break
if args.suffix is not None:
wandb_dir = cfg.log_config.hooks[i].init_kwargs.dir
cfg.log_config.hooks[i].init_kwargs.dir = f'{wandb_dir}-' \
f'{args.suffix}'
mmcv.mkdir_or_exist(cfg.log_config.hooks[i].init_kwargs.dir)
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
elif args.auto_resume:
if osp.exists(osp.join(cfg.work_dir, 'latest.pth')):
cfg.resume_from = osp.join(cfg.work_dir, 'latest.pth')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config: {cfg.text}')
logger.info(f'Config.pretty_text: {cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
logger.info(f'Model: {str(model)}')
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
if args.validate:
warnings.warn('val workflow is duplicated with `--validate`, '
'it is recommended to use `--validate`. see '
'https://github.com/open-mmlab/mmaction2/pull/123')
val_dataset = copy.deepcopy(cfg.data.val)
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmaction version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmaction_version=__version__, config=cfg.text)
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=False,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8a73c785a44ece6263c3e40dfde840832bed6655 | 65c03709b91ce8f006641b30d481b4fda651520e | /Coding/3_indexing_slicing.py | a52c46b665b5ac657b828965eb9a307d71a3bd84 | [] | no_license | ahad-emu/python-code | 332121ad289b169ca8099c88bde13d7121be1030 | 135805c78de38eaf1bd5500b44625b36b7b653c0 | refs/heads/master | 2020-09-09T01:01:41.313964 | 2020-07-04T16:31:37 | 2020-07-04T16:31:37 | 221,296,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | #indexing....
my_string = "hello World"
print(my_string)
print(my_string[0]) #index zero
print(my_string[7]) #index seven
print(my_string[8]) #index eight
print(my_string[-1]) #last index
print(my_string[-2]) #second last index
#slicing....
my_string = "ABCDEFGHIJKL"
print(my_string)
print(my_string[2:]) #index two to last
print(my_string[:3]) #index zero to two
print(my_string[2:6]) #index 2 to 5
print(my_string[::2]) #one step jump
print(my_string[::-1]) #reverse
| [
"[email protected]"
] | |
d1194035877ccf46cd000542fa0cb83f128378d8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2847/60900/255175.py | 9f7f8927fa27a8621f5be9e8716e364de835126c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | n = input()
str1 = input()
nums = str1.split(" ")
str2 = input()
nums2 = str2.split(" ")
count = 0
for i in range(int(nums2[0]),int(nums2[1])):
count = count + int(nums[i-1])
print(count) | [
"[email protected]"
] | |
cc8f3b6012f30c1bdad4f411f454e6e816b04bde | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02549/s176160941.py | bdc2e8e51f883ca3eca69259dc2774ce9724f789 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | N, K = map(int, input().split())
L = [0] * K
R = [0] * K
for i in range(0, K):
L[i], R[i] = map(int, input().split())
moves = [0] * N
moves[0] = 1
rui_wa = [0] * N
rui_wa[0] = 1
for i in range(1, N):
for j in range(0, K):
l = max(i - L[j], 0)
r = max(i - R[j], 0)
if i - L[j] < 0:
continue
moves[i] += (rui_wa[l] - rui_wa[r - 1]) % 998244353
rui_wa[i] = (moves[i] + rui_wa[i - 1]) % 998244353
print(moves[N - 1] % 998244353)
| [
"[email protected]"
] | |
5de0b81f7eb9ffcb6f37c172ee267011003055f3 | 8a03b8459902d1bf0806f8d3387fb962bb57cf58 | /User_create/Negative_changepwd.py | b654fadc05e357cbb963c843646791c0392766c4 | [] | no_license | chetandg123/cQube | f95a0e86b1e98cb418de209ad26ae2ba463cfcbc | a862a1cdf46faaaff5cad49d78c4e5f0454a6407 | refs/heads/master | 2022-07-18T12:43:06.839896 | 2020-05-22T13:23:52 | 2020-05-22T13:23:52 | 258,089,042 | 0 | 0 | null | 2020-05-08T16:28:26 | 2020-04-23T03:55:52 | HTML | UTF-8 | Python | false | false | 1,828 | py | import time
import unittest
from selenium import webdriver
from Data.Paramters import Data
class Click_ChangePwd(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(Data.Path)
self.driver.maximize_window()
self.driver.implicitly_wait(10)
self.driver.get(Data.URL)
self.driver.find_element_by_xpath(Data.email).send_keys(Data.username)
self.driver.find_element_by_xpath(Data.pwd).send_keys(Data.password)
self.driver.find_element_by_xpath(Data.loginbtn).click()
time.sleep(5)
def test_set_negative_newpwd(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
time.sleep(3)
self.driver.find_element_by_xpath("/html/body/app-root/app-home/mat-sidenav-container/mat-sidenav/div/mat-nav-list/mat-list/mat-list-item/div/button/span/mat-icon").click()
time.sleep(3)
self.driver.find_element_by_xpath("/html/body/app-root/app-home/mat-sidenav-container/mat-sidenav/div/mat-nav-list/mat-list/div/a[2]/div/span").click()
pwd =self.driver.find_element_by_xpath("//h2").text
self.assertEqual(pwd,"Change Password","Change password is not found!..")
self.driver.find_element_by_xpath("//input[@name='newPasswd']").send_keys("tibil123")
time.sleep(2)
self.driver.find_element_by_xpath("//input[@name='cnfpass']").send_keys("tibil12")
time.sleep(2)
self.driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(3)
errormsg = self.driver.find_element_by_xpath("//p").text
print(errormsg)
self.assertEqual(errormsg,"Password not matched" ,"Matching password!")
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
7229a9c285b03df22f176624c5e0f5b54b27a88d | a2fab78b021469748337bdbe46d60f4b2dccf6b9 | /day04/03.字符串的遍历.py | c5d627376bea9ed9bd537324019d43ced7a0f603 | [] | no_license | yywecanwin/PythonLearning | 06175886b42f6ec6be5ee8fa379365779e8e14e6 | f59d381692f22b3c7cf605aec88500f6c0267ffc | refs/heads/master | 2020-08-01T13:03:17.458829 | 2020-02-11T02:53:33 | 2020-02-11T02:53:33 | 211,006,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # -*- coding: utf-8 -*-
# author:yaoyao time:2019/9/28
"""
字符串的遍历:
一个一个的得到里面的元素
"""
s = "hello python"
"""
# 遍历的方式1:
# 1.定义一个变量i 表示元素的索引,赋值为0,因为元素的索引是从0开始的
i = 0
# 2.while循环遍历字符串
while i <= len(s)-1:
#3.在循环中, 根据索引得到元素, 把元素打印出来
print(s[i])
# 4.在循环中,让i加1,是为了让索引加1,便于下次循环时得到下一个元素
i += 1
"""
"""
for 变量 in range()函数或者容器
"""
# 遍历方式2:for循环
for c in s:
print(c)
| [
"[email protected]"
] | |
e03b7ef67849e583abb795e43e173297706316ff | 798960eb97cd1d46a2837f81fb69d123c05f1164 | /symphony/cli/pyworkforce/graphql/input/check_list_category.py | 8ab69aef97666923166c55f03daa8d9166c133bc | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | kyaaqba/magma | 36d5fa00ce4f827e6ca5ebd82d97a3d36e5f5b5b | fdb7be22a2076f9a9b158c9670a9af6cad68b85f | refs/heads/master | 2023-01-27T12:04:52.393286 | 2020-08-20T20:23:50 | 2020-08-20T20:23:50 | 289,102,268 | 0 | 0 | NOASSERTION | 2020-08-20T20:18:42 | 2020-08-20T20:18:41 | null | UTF-8 | Python | false | false | 590 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from gql.gql.datetime_utils import DATETIME_FIELD
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from ..input.check_list_item import CheckListItemInput
@dataclass
class CheckListCategoryInput(DataClassJsonMixin):
title: str
checkList: List[CheckListItemInput]
id: Optional[str] = None
description: Optional[str] = None
| [
"[email protected]"
] | |
9ac78261b3e0bfe904692b30ec71925efb1b2fd5 | e203ddace08580170e3b4de9c79588209e857c1c | /books.py | 23233198dc918f7183dbddd721d36fc2b0141ebf | [] | no_license | stradtkt/OOPTreehouse-Python | e17f3fd48840049b8b741aa0e30e54d1409804b2 | 84e0ef2142118bf44c416a3b1dde3519ff57fd15 | refs/heads/main | 2023-02-26T15:03:27.053205 | 2021-02-04T13:04:26 | 2021-02-04T13:04:26 | 334,620,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | class Book:
def __init__(self, title, author):
self.title = title
self.author = author
def __str__(self):
return '{}: {}'.format(self.title, self.author)
class Bookcase:
def __init__(self, books=None):
self.books = books
@classmethod
def create_bookcase(cls, book_list):
books = []
for title, author in book_list:
books.append(Book(title, author))
return cls(books) | [
"[email protected]"
] | |
24ee0c3b5ba31c62359bb82634292671f9df0b24 | e6b4b9dcca11d6a8abd110cd681b2712f9843030 | /src/env/dm_control/dm_control/composer/observation/observable/base_test.py | 0f519c2ba2e7da274db7fe54fd6ede820fd6dc34 | [
"MIT",
"Apache-2.0"
] | permissive | nicklashansen/svea-vit | a1b1d74fba88aaa94c876d354e7d6ed60cd3f064 | 33d3ea2682409ee82bf9c5129ceaf06ab01cd48e | refs/heads/main | 2023-07-21T18:35:08.439052 | 2023-07-11T20:09:50 | 2023-07-11T20:09:50 | 379,015,671 | 16 | 3 | MIT | 2023-07-11T20:09:52 | 2021-06-21T17:43:32 | Python | UTF-8 | Python | false | false | 5,914 | py | # Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for observable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from dm_control import mujoco
from dm_control.composer.observation import fake_physics
from dm_control.composer.observation.observable import base
import numpy as np
import six
_MJCF = """
<mujoco>
<worldbody>
<light pos="0 0 1"/>
<body name="body" pos="0 0 0">
<joint name="my_hinge" type="hinge" pos="-.1 -.2 -.3" axis="1 -1 0"/>
<geom name="my_box" type="box" size=".1 .2 .3" rgba="0 0 1 1"/>
<geom name="small_sphere" type="sphere" size=".12" pos=".1 .2 .3"/>
</body>
<camera name="world" mode="targetbody" target="body" pos="1 1 1" />
</worldbody>
</mujoco>
"""
class _FakeBaseObservable(base.Observable):
def _callable(self, physics):
pass
class ObservableTest(absltest.TestCase):
def testBaseProperties(self):
fake_observable = _FakeBaseObservable(update_interval=42,
buffer_size=5,
delay=10,
aggregator=None,
corruptor=None)
self.assertEqual(fake_observable.update_interval, 42)
self.assertEqual(fake_observable.buffer_size, 5)
self.assertEqual(fake_observable.delay, 10)
fake_observable.update_interval = 48
self.assertEqual(fake_observable.update_interval, 48)
fake_observable.buffer_size = 7
self.assertEqual(fake_observable.buffer_size, 7)
fake_observable.delay = 13
self.assertEqual(fake_observable.delay, 13)
enabled = not fake_observable.enabled
fake_observable.enabled = not fake_observable.enabled
self.assertEqual(fake_observable.enabled, enabled)
def testGeneric(self):
physics = fake_physics.FakePhysics()
repeated_observable = base.Generic(
fake_physics.FakePhysics.repeated, update_interval=42)
repeated_observation = repeated_observable.observation_callable(physics)()
self.assertEqual(repeated_observable.update_interval, 42)
np.testing.assert_array_equal(repeated_observation, [0, 0])
def testMujocoFeature(self):
physics = mujoco.Physics.from_xml_string(_MJCF)
hinge_observable = base.MujocoFeature(
kind='qpos', feature_name='my_hinge')
hinge_observation = hinge_observable.observation_callable(physics)()
np.testing.assert_array_equal(
hinge_observation, physics.named.data.qpos['my_hinge'])
box_observable = base.MujocoFeature(
kind='geom_xpos', feature_name='small_sphere', update_interval=5)
box_observation = box_observable.observation_callable(physics)()
self.assertEqual(box_observable.update_interval, 5)
np.testing.assert_array_equal(
box_observation, physics.named.data.geom_xpos['small_sphere'])
observable_from_callable = base.MujocoFeature(
kind='geom_xpos', feature_name=lambda: ['my_box', 'small_sphere'])
observation_from_callable = (
observable_from_callable.observation_callable(physics)())
np.testing.assert_array_equal(
observation_from_callable,
physics.named.data.geom_xpos[['my_box', 'small_sphere']])
def testMujocoCamera(self):
physics = mujoco.Physics.from_xml_string(_MJCF)
camera_observable = base.MujocoCamera(
camera_name='world', height=480, width=640, update_interval=7)
self.assertEqual(camera_observable.update_interval, 7)
camera_observation = camera_observable.observation_callable(physics)()
np.testing.assert_array_equal(
camera_observation, physics.render(480, 640, 'world'))
self.assertEqual(camera_observation.shape,
camera_observable.array_spec.shape)
self.assertEqual(camera_observation.dtype,
camera_observable.array_spec.dtype)
camera_observable.height = 300
camera_observable.width = 400
camera_observation = camera_observable.observation_callable(physics)()
self.assertEqual(camera_observable.height, 300)
self.assertEqual(camera_observable.width, 400)
np.testing.assert_array_equal(
camera_observation, physics.render(300, 400, 'world'))
self.assertEqual(camera_observation.shape,
camera_observable.array_spec.shape)
self.assertEqual(camera_observation.dtype,
camera_observable.array_spec.dtype)
def testCorruptor(self):
physics = fake_physics.FakePhysics()
def add_twelve(old_value, random_state):
del random_state # Unused.
return [x + 12 for x in old_value]
repeated_observable = base.Generic(
fake_physics.FakePhysics.repeated, corruptor=add_twelve)
corrupted = repeated_observable.observation_callable(
physics=physics, random_state=None)()
np.testing.assert_array_equal(corrupted, [12, 12])
def testInvalidAggregatorName(self):
name = 'invalid_name'
with six.assertRaisesRegex(self, KeyError, 'Unrecognized aggregator name'):
_ = _FakeBaseObservable(update_interval=3, buffer_size=2, delay=1,
aggregator=name, corruptor=None)
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
d2dc956bbc48eb170fbbda451cf3630d7b8168b1 | 5545d3c3e910ccb5b45b2277a71ad3c3ea3caedc | /jamenson/runtime/Attic/runtime.py | 85f8fe28ad0310322de14198533d79ebdb9fe6a4 | [
"Apache-2.0"
] | permissive | matthagy/Jamenson | 61de19c71da6e133bf7d8efbb933a1036cf1e6f5 | 18a0fdd60b3d56ed4a6d4e792132535324490634 | refs/heads/master | 2016-09-11T04:31:28.895242 | 2013-04-04T00:14:44 | 2013-04-04T00:14:44 | 1,781,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,969 | py |
'''objects used by runtime
'''
from itertools import count
import string
class symbol(object):
#instance cache for `is based comparisions and `id based hashing
_cache = {}
__slots__ = ['printForm']
@classmethod
def raw(cls, printForm):
self = object.__new__(cls)
self.printForm = printForm
return self
def __new__(cls, printForm):
try:
return cls._cache[printForm]
except KeyError:
self = cls._cache[printForm] = cls.raw(printForm)
return self
def __repr__(self):
return 'symbol(%s)' % (self.printForm)
def __str__(self):
return bprint(self)
def __reduce__(self):
if gensymbolp(self):
return (gensym, (self.printForm[2:],))
else:
return (symbol, (self.printForm,))
def reset_gensym_counter(start=0):
global gensym_counter
gensym_counter = iter(count(start)).next
reset_gensym_counter()
def gensym(base='gensym'):
return symbol.raw('#:%s%d' % (base,gensym_counter()))
def gensymbolp(op):
return op.printForm not in symbol._cache
class cons(object):
__slots__ = 'car cdr'.split()
def __init__(self, car, cdr):
self.car = car
self.cdr = cdr
def __iter__(self):
op = self
while op is not nil:
if not isinstance(op, cons):
raise TypeError("iterating over non-cons cdr")
yield op.car
op = op.cdr
def __nonzero__(self):
return self is not nil
def __repr__(self):
return str(self)
#if self is nil:
# return 'nil'
#return 'cons(%r, %r)' % (self.car, self.cdr)
def __str__(self):
return bprint(self)
def __reduce__(self):
if self is nil:
return (load_nil, ())
else:
return (cons, (self.car, self.cdr))
def __eq__(self, other):
if not isinstance(other, cons):
return NotImplemented
return self is other or (self.car == other.car and
self.cdr == other.cdr)
nil = cons(None, None)
nil.car = nil
nil.cdr = nil
def load_nil():
return nil
def clist(*seq):
head = acc = nil
for op in seq:
cell = cons(op, nil)
if acc is nil:
head = cell
else:
acc.cdr = cell
acc = cell
return head
def bprint(op):
acc = []
bprint_collect_parts(acc.append, set(), op)
return ''.join(acc)
noQuoteChars = set(string.ascii_letters +
string.digits +
string.punctuation + ' ') - set('"')
escapeChars = {
'\n': '\\n',
'\t': '\\t',
'"': '\\"'}
qsymbol = symbol('%quote')
def bprint_collect_parts(emit, memo, op):
if isinstance(op, symbol):
emit(op.printForm)
elif op is nil:
emit('nil')
elif isinstance(op, cons):
if op.car is qsymbol:
assert op.cdr.cdr is nil, 'bad quote %r' % (op.cdr,)
emit("'")
bprint_collect_parts(emit, memo, op.cdr.car)
return
if id(op) in memo:
emit('#<circular cons>')
return
memo.add(id(op))
emit('(')
first = True
while op is not nil:
if first:
first = False
else:
emit(' ')
bprint_collect_parts(emit, memo, op.car)
if isinstance(op.cdr, cons):
op = op.cdr
else:
emit(' . ')
bprint_collect_parts(emit, memo, op.cdr)
break
emit(')')
elif isinstance(op, (int,long,float)):
emit(str(op))
elif op is None or op is False or op is True:
emit(str(op).lower())
elif isinstance(op, str):
emit('"')
for c in op:
if c in noQuoteChars:
emit(c)
elif c in escapeChars:
emit(escapeChars[c])
else:
emit('\\x%x' % ord(c))
emit('"')
else:
emit('#<')
emit(repr(op))
emit('>')
class MacroFunction(object):
__slots__ = ['func', 'robust']
def __init__(self, func, robust=False):
self.func = func
self.robust = robust
def __call__(self, *args, **kwds):
raise RuntimeError("cannot directly call macro %s" % self.func.__name__)
def macroExpand(self, translator, *args, **kwds):
return self.func(translator, *args, **kwds)
def __getstate__(self):
return self.func, self.robust
def __setstate__(self, state):
self.func, self.robust = state
import types
class obj(object):
def __init__(self, **kwds):
vars(self).update(kwds)
def __repr__(self):
return '(%s %s)' % (self.__class__.__name__,
' '.join(":%s %r" % t
for t in vars(self).iteritems()))
| [
"[email protected]"
] | |
042e1d38d801465d0ca7ae7a6feda110a7e5825c | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /python开发技术详解/源文件/02/2.4/2.4.1/number_type.py | 12972ea330682a3ae610b87a14be45e5770f2447 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | GB18030 | Python | false | false | 326 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 下面的两个i并不是同一个对象
i = 1
print id(i)
i = 2
print id(i)
# 整型
i = 1
print type(i)
# 长整型
l = 9999999990
print type(l)
# 浮点型
f = 1.2
print type(f)
# 布尔型
b = True
print type(b)
# 复数类型
c = 7 + 8j
print type(c)
| [
"[email protected]"
] | |
f04777412a8523157317d3eac4f93709fc5b3593 | 1da23d3bc4a7e21d81fe26c6b9f2b7f50711239b | /server/rating/calculation/online.py | 54cb691486cf77569c23edf725df62292f77533f | [
"MIT"
] | permissive | eIGato/mahjong-portal | 42dc62d3f98656ba15c02c3060f351f03ac3304a | 550a2a872c4287adab6ce30c3440dc2141430a20 | refs/heads/master | 2021-07-10T01:52:35.089662 | 2020-10-21T11:45:40 | 2020-10-21T11:45:40 | 212,129,601 | 0 | 0 | MIT | 2019-10-01T15:19:36 | 2019-10-01T15:19:36 | null | UTF-8 | Python | false | false | 573 | py | from player.models import Player
from rating.calculation.rr import RatingRRCalculation
from tournament.models import Tournament, TournamentResult
class RatingOnlineCalculation(RatingRRCalculation):
TOURNAMENT_TYPES = [Tournament.ONLINE]
SECOND_PART_MIN_TOURNAMENTS = 3
def get_players(self):
player_ids = TournamentResult.objects.filter(tournament__tournament_type=Tournament.ONLINE).values_list(
"player_id", flat=True
)
return Player.objects.filter(id__in=player_ids).exclude(is_replacement=True).exclude(is_hide=True)
| [
"[email protected]"
] | |
766acc5663cd498b1b0e9bc3c0a1d75f176b8b8b | 83003007b7bc12493e2bca2b5c78be5ea86df56c | /Day56-Day70/Day60/rabbit.py | df44054acbf7a81a072a6cb377f8dbb2ea4dd6e6 | [] | no_license | a6361117/code | fa7fe2f33c522ad38d92e6c429b50ef8a271bb1e | bd8bf877416acc5400dbda90212b7e83020ff643 | refs/heads/master | 2022-09-07T22:22:24.765271 | 2020-05-26T14:27:47 | 2020-05-26T14:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py |
#绘制兔
from turtle import *
speed(10)
#兔的面部
color('pink')
pensize(5)
circle(radius=100)#脸
#眼睛
pencolor('black')
#左眼
pu()
goto(-45,92)
pd()
begin_fill()
color((0,0,0),(0,0,0.1))
circle(radius=15)
#右眼
pu()
goto(45,92)
pd()
circle(radius=15)
end_fill()
#鼻子
pu()
goto(20,60)
color('pink')
pd()
begin_fill()
goto(-20,60)
goto(0,45)
goto(20,60)
end_fill()
#嘴
goto(0,45)
goto(0,40)
seth(-90)
circle(10,120)
pu()
goto(0,40)
seth(-90)
pd()
circle(-10,120)
#小兔的耳朵
#左耳
pu()
goto(-60,180)#
seth(200)
pd()
circle(radius=350,extent=90)
goto(-98,110)
#右耳
pu()
goto(60,180)#
seth(-20)
pd()
circle(radius=-350,extent=90)
goto(98,110)
#小兔的身体
pu()
goto(20,3)
seth(-25)
pd()
circle(radius=-250,extent=25)
circle(radius=-135,extent=260)
seth(50)
circle(radius=-250,extent=25)
##小兔的胳膊
#左臂
pu()
seth(180)
goto(-30,-3)
pd()
#小短胳膊
##circle(radius=270,extent=20)
##circle(radius=20,extent=190)
circle(radius=248,extent=30)
circle(radius=29,extent=185)
#右臂
pu()
seth(0)
goto(30,-3)
pd()
circle(radius=-248,extent=30)
circle(radius=-27,extent=184)
##小兔的脚
##左脚
pu()
goto(-162,-260)#
pd()
seth(0)
circle(radius=41)
#右脚
pu()
goto(164,-260)
pd()
circle(radius=41)
done()
| [
"[email protected]"
] | |
fc3617765023ab1000296d388685479f6ba1ca6f | 743d1918178e08d4557abed3a375c583130a0e06 | /src/CPSCAnalysis/getCPSCRelated.py | e63093d367e5958dd952311a6b852f55229f43a2 | [] | no_license | aquablue1/dns_probe | 2a027c04e0928ec818a82c5bf04f485a883cfcb3 | edd4dff9bea04092ac76c17c6e77fab63f9f188f | refs/heads/master | 2020-03-25T19:40:07.346354 | 2018-11-17T05:31:43 | 2018-11-17T05:31:43 | 144,094,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | """
" Get the original CPSC related DNS traffic from original data files.
" Since CPSC DNS (ns1/2.cpsc.ucalgary.ca) mostly involved in the inbound traffic.
" Therefore only the inbound traffic is considered.
" By Zhengping on 2018-08-10
"""
from src.util.FolderReader import folderReader
from src.util.FileReader import fileReader
from src.util.FileWriter import batchFileWriter
from src.util.DNSFieldLocMap import FieldToLoc
import os
def doHourlyCPSCRelatedGen(inputFilename):
inputFile = fileReader(inputFilename)
checkedNames = ["ns1.cpsc.ucalgary.ca", "ns2.cpsc.ucalgary.ca", "mirror.cpsc.ucalgary.ca"]
ret_list = []
for line in inputFile:
queriedName = line.split("\t")[FieldToLoc["query"]]
if queriedName in checkedNames:
ret_list.append(line)
return ret_list
def doDailyCPSCRelatedGen(inputFolder, outputFolder):
filenames = folderReader(inputFolder, date)
outputHandler = batchFileWriter(outputFolder)
for filename in filenames:
outputFilename = "CPSCRow_%s" % filename.split("/")[-1]
hourlyRowData = doHourlyCPSCRelatedGen(filename)
for line in hourlyRowData:
outputHandler.writeString(outputFilename, line+"\n")
if __name__ == '__main__':
date = "2018-07-01"
inputFolder = "../../data/%s/inbound" % date
outputFolder = "../../result/CPSCRow/%s/" % date
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
doDailyCPSCRelatedGen(inputFolder, outputFolder)
| [
"[email protected]"
] | |
c15ff70830104dc267e24f059b88cd1002f1879d | ecae7275fd43ec93ca5771083e05ae864685faf9 | /DataScience/pandas/2column1.py | eb1bc2f91c3de96c00fb9272b9179e11d6d5d730 | [] | no_license | shamoldas/pythonBasic | 104ca8d50099c2f511802db1f161f6d050f879cc | 3a7252a15f6d829f55700ec2ff7f7d153f3ec663 | refs/heads/main | 2023-01-09T06:38:55.357476 | 2020-11-11T12:27:31 | 2020-11-11T12:27:31 | 311,960,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py |
# importing pandas
import pandas as pd
df = pd.DataFrame({'Last': ['Gaitonde', 'Singh', 'Mathur'],
'First': ['Ganesh', 'Sartaj', 'Anjali']})
print('Before Join')
print(df, '\n')
print('After join')
df['Name'] = df['First'].str.cat(df['Last'], sep =" ")
print(df)
| [
"[email protected]"
] | |
a0ba64b046817c1d4b87a37d70ac854c54c543fe | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_192/ch160_2020_06_19_20_23_54_764349.py | e8c31d19b6bb9456664ada3169ee602ac3e1ff52 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | import math
x = 0
for x in range(0, 90):
python = math.sin(x)
bhaskara = (4*x*(180 - x))/(40500 - x*(180 - x))
modulo = bhaskara - python
if python != bhaskara:
print(max(abs(modulo))
| [
"[email protected]"
] | |
ce01006fc28f38174aeae02dffe49f0214c5ae14 | 9554891e5e91fa9d7f75df0f28ae1d220c552478 | /tests/settings.py | 0bfc93f139030f93750f7d8315cca6601c124b85 | [
"MIT"
] | permissive | kmmbvnr/django-polymodels | 2e79cd72c68935a7e83953e0864ced1cb4a530c5 | 7a9b64b1851fea23a64d3d9421a69911e1669a49 | refs/heads/master | 2022-06-21T04:27:15.836175 | 2020-05-07T03:12:18 | 2020-05-07T10:36:06 | 261,932,926 | 1 | 0 | MIT | 2020-05-07T02:44:49 | 2020-05-07T02:44:48 | null | UTF-8 | Python | false | false | 245 | py | from __future__ import unicode_literals
SECRET_KEY = 'not-anymore'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'polymodels',
'tests',
]
| [
"[email protected]"
] | |
2958f0f909860b6534a0178f12383d7da22b1669 | 4bd4bacecee33cada173e427b5ecb1d758bafaad | /src/scalarizr/externals/chef/auth.py | ceb60cae41ffdfa7437210aa80b15e234cc31fef | [] | no_license | kenorb-contrib/scalarizr | 3f2492b20910c42f6ab38749545fdbb79969473f | 3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83 | refs/heads/master | 2022-11-26T10:00:58.706301 | 2017-11-02T16:41:34 | 2017-11-02T16:41:34 | 108,550,233 | 0 | 2 | null | 2020-07-24T11:05:36 | 2017-10-27T13:33:46 | Python | UTF-8 | Python | false | false | 2,435 | py | from __future__ import with_statement
import base64
import datetime
import hashlib
import re
def _ruby_b64encode(value):
"""The Ruby function Base64.encode64 automatically breaks things up
into 60-character chunks.
"""
b64 = base64.b64encode(value)
for i in xrange(0, len(b64), 60):
yield b64[i:i+60]
def ruby_b64encode(value):
return '\n'.join(_ruby_b64encode(value))
def sha1_base64(value):
"""An implementation of Mixlib::Authentication::Digester."""
return ruby_b64encode(hashlib.sha1(value).digest())
class UTC(datetime.tzinfo):
"""UTC timezone stub."""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return self.ZERO
utc = UTC()
def canonical_time(timestamp):
if timestamp.tzinfo is not None:
timestamp = timestamp.astimezone(utc).replace(tzinfo=None)
return timestamp.replace(microsecond=0).isoformat() + 'Z'
canonical_path_regex = re.compile(r'/+')
def canonical_path(path):
path = canonical_path_regex.sub('/', path)
if len(path) > 1:
path = path.rstrip('/')
return path
def canonical_request(http_method, path, hashed_body, timestamp, user_id):
# Canonicalize request parameters
http_method = http_method.upper()
path = canonical_path(path)
if isinstance(timestamp, datetime.datetime):
timestamp = canonical_time(timestamp)
hashed_path = sha1_base64(path)
return ('Method:%(http_method)s\n'
'Hashed Path:%(hashed_path)s\n'
'X-Ops-Content-Hash:%(hashed_body)s\n'
'X-Ops-Timestamp:%(timestamp)s\n'
'X-Ops-UserId:%(user_id)s' % vars())
def sign_request(key, http_method, path, body, host, timestamp, user_id):
"""Generate the needed headers for the Opscode authentication protocol."""
timestamp = canonical_time(timestamp)
hashed_body = sha1_base64(body or '')
# Simple headers
headers = {
'x-ops-sign': 'version=1.0',
'x-ops-userid': user_id,
'x-ops-timestamp': timestamp,
'x-ops-content-hash': hashed_body,
}
# Create RSA signature
req = canonical_request(http_method, path, hashed_body, timestamp, user_id)
sig = _ruby_b64encode(key.private_encrypt(req))
for i, line in enumerate(sig):
headers['x-ops-authorization-%s'%(i+1)] = line
return headers
| [
"[email protected]"
] | |
33a33cfd3f32dd9321b486aeb4d948593d5c76b2 | b15178f2ec828894c3b2d31b3ff6164be37ab875 | /setup.py | a511bad7d960a83c9af9d54df61c11eb837181ee | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/BIOMD0000000007 | 08e9de5d8d6745cde85d337c385e0f41f53906d3 | 1c03559e6e807621fa757386ea03dfae2c0ca312 | refs/heads/master | 2021-01-25T06:05:51.198922 | 2014-10-16T05:13:44 | 2014-10-16T05:13:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from setuptools import setup, find_packages
setup(name='BIOMD0000000007',
version=20140916,
description='BIOMD0000000007 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000007',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | [
"[email protected]"
] | |
d827e99e9bfe24739b29b9efd7b67641f05c3576 | ff3e0d75fda9a1a94fd8ba7618c0aab499b8393d | /musicians/migrations/0004_auto_20200813_0055.py | 255088a50889f0134f21340a8b9558fc20ab73a7 | [
"MIT"
] | permissive | victorsemenov1980/DjangoFullStack | bbe2897c20633b3eba8db807442eb0921668e6f1 | 655a3a9980057913c1aeeb1cd54683ccf12ad901 | refs/heads/master | 2023-04-05T23:34:13.836215 | 2021-04-22T18:08:51 | 2021-04-22T18:08:51 | 289,705,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | # Generated by Django 3.1 on 2020-08-13 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicians', '0003_info'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='Featured',
),
migrations.RemoveField(
model_name='service',
name='Featured_Price',
),
migrations.RemoveField(
model_name='service',
name='Price_hour',
),
migrations.RemoveField(
model_name='service',
name='Price_service',
),
migrations.AddField(
model_name='main',
name='Bio',
field=models.TextField(default='none'),
preserve_default=False,
),
migrations.AddField(
model_name='main',
name='Instrument',
field=models.CharField(default='none', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='main',
name='Organization',
field=models.CharField(default='none', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='service',
name='Description',
field=models.CharField(blank=True, max_length=255),
),
]
| [
"[email protected]"
] | |
0ab52593e61a8c030d9e303a4c84011ce9f94f21 | 75e24fc71cf0833bb6040fa5037a0523c67d4581 | /nlplingo/active_learning/metrics.py | 5c880ba632dbf6cfbae101db65920c9732147a90 | [
"Apache-2.0"
] | permissive | BBN-E/nlplingo | 53d5ff2aa17d03a1c6db8afc8ed2b0cf683b1c55 | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | refs/heads/main | 2022-12-19T19:28:11.666850 | 2020-10-09T01:16:32 | 2020-10-09T01:16:32 | 302,090,268 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import numpy as np
def best_vs_second_best(predictions):
"""Computes best vs second best metric
:type predictions: numpy.nparray
:rtype: numpy.nparray
"""
pred_sorted_arg = np.argsort(-predictions, axis=1)
best_vs_second_best_score = 1 - abs(
predictions[range(predictions.shape[0]), pred_sorted_arg[:, 0]] -
predictions[range(predictions.shape[0]), pred_sorted_arg[:, 1]]
)
return best_vs_second_best_score
| [
"[email protected]"
] | |
a2ae33df39f4c18bf1122e51783c1b3641f8a71b | 0a004fc3fe8e36fd7ce0ed2cc7e8140982315e03 | /unsupervised_learning/0x00-dimensionality_reduction/0-pca.py | 96f2f628a740e86a328e4e2a17f3fdae39d1650a | [] | no_license | pafuentess/holbertonschool-machine_learning | 266ed4f05e106e194cdafe39544e48904f6538f4 | 3bffd1391b3fc790f0137d0afbe90eb8e2f7d713 | refs/heads/master | 2023-03-26T15:12:14.721409 | 2021-03-20T20:28:15 | 2021-03-20T20:28:15 | 279,388,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/usr/bin/env python3
""" doc """
import numpy as np
def pca(X, var=0.95):
""" doc """
U, sigma, V = np.linalg.svd(X)
a_sum = np.cumsum(sigma)
dim = [i for i in range(len(sigma)) if((a_sum[i]) / a_sum[-1]) >= var]
ndim = dim[0] + 1
return V.T[:, :ndim]
| [
"[email protected]"
] | |
6c13a2bb9c012badbf065b7117c98cf2344d8b14 | f7f834e68ce816011ae30be0883deef090fbeeed | /camp/Z_Template_2018/Day 5 - Space Invaders/space_invaders.py | be8cc7bd451a55706eed78c51f0099e5ac7b5db7 | [] | no_license | Rosebotics/PythonGameDesign2019 | 97b568cf999dea8642e254a22e528539946118e3 | 2f03476df940257adc2928f0c985c01daa5166f4 | refs/heads/master | 2020-06-04T04:42:35.656392 | 2019-06-22T16:21:57 | 2019-06-22T16:21:57 | 191,875,778 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,301 | py | import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# TODO: Save the screen into a field
# TODO: Save the x into a field
# TODO: Set the y to 591 as a field (which is just above the fighter)
# TODO: Set a field called exploded to False
pass
def move(self):
# TODO: Move the missile up 5
pass
def draw(self):
# TODO: Draw a red line from x, y that is 8 pixels in height
pass
class Fighter:
def __init__(self, screen, x, y):
self.screen = screen
self.image = pygame.image.load("fighter.png").convert()
self.image.set_colorkey((255, 255, 255))
self.x = x
self.y = y
self.missiles = []
def draw(self):
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
self.missiles.append(Missile(self.screen, self.x + 50))
def remove_exploded_missles(self):
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
self.dead = False
self.screen = screen
self.x = x
self.y = y
self.image = pygame.image.load("badguy.png").convert()
self.image.set_colorkey((0, 0, 0))
self.original_x = x
self.moving_right = True
def move(self):
if self.moving_right:
self.x = self.x + 2
if self.x > self.original_x + 100:
self.moving_right = False
else:
self.x = self.x - 2
if self.x < self.original_x - 100:
self.moving_right = True
def draw(self):
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
return len(self.badguys) == 0
def move(self):
for badguy in self.badguys:
badguy.move()
def draw(self):
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("Space Invaders")
screen = pygame.display.set_mode((640, 650))
# TODO: Set enemy_rows to an initial value of 3.
# TODO: Create an EnemyFleet object (called enemy) with the screen and enemy_rows
# TODO: Create a Fighter (called fighter) at location 320, 590
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
# TODO: If the event type is KEYDOWN and pressed_keys[K_SPACE} is True, then fire a missile
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
# TODO: If K_LEFT is pressed move the fighter left 3
# TODO: If K_RIGHT is pressed move the fighter right 3
# TODO: Draw the fighter
# TODO: Move the enemy
# TODO: Draw the enemy
# TODO: For each missle in the fighter missiles
# TODO: Move the missle
# TODO: Draw the missle
# TODO: For each badguy in the enemy badguys
# TODO: For each missle in the fighter missiles
# TODO: If the badguy is hit by the missle
# TODO: Mark the badguy as dead = True
# TODO: Mark the missile as exploded = True
# TODO: Use the fighter to remove exploded missiles
# TODO: Use the enemy to remove dead badguys
# TODO: If the enemy id_defeated
# TODO: Increment the enemy_rows
# TODO: Create a new enemy with the screen and enemy_rows
pygame.display.update()
main()
| [
"[email protected]"
] | |
7c7ec50d29b03c3642ab2ceba8b96c4be5487afb | 669e9241b02bdaa303fbc2fd4023b90d4d179a59 | /Basketball Scoreboard/challenge1.py | 72070c13f348ee839784ae72678555d7d2e7e973 | [] | no_license | benjaminpotter/HatchProjects | 0854cf46ae7c3781468116a5d63b703dd54ae68c | 7f6a948d3474c755d071751b725c059e6c7f3553 | refs/heads/master | 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | def setup():
size(400, 400)
threePoint = 0
fieldGoal = 0
freeThrow = 0
def drawScoreboard():
global threePoint, fieldGoal, freeThrow
background(0, 0, 0)
noFill()
stroke(255, 0, 0)
rect(30, 337, 110, 34)
rect(155, 337, 110, 34)
rect(278, 337, 116, 34)
fill(255)
textSize(22)
text("3-Point", 50, 361)
text("Field Goal", 160, 361)
text("Free Throw", 279, 361)
textSize(150)
fill(threePoint * 1.2 + 30, fieldGoal * 1.3 + 30, freeThrow * 1.8 + 30)
text(threePoint * 3 + fieldGoal * 2 + freeThrow, 116, 200)
def addPoints():
global threePoint, fieldGoal, freeThrow
if mouseX > 30 and mouseX < 140 and mouseY > 337 and mouseY < 371:
threePoint += 1
elif mouseX > 155 and mouseX < 265 and mouseY > 337 and mouseY < 371:
fieldGoal += 1
elif mouseX > 278 and mouseX < 388 and mouseY > 337 and mouseY < 371:
freeThrow += 1
def draw():
drawScoreboard()
def mousePressed():
addPoints() | [
"[email protected]"
] | |
08b01af01392cb5b5e0ab0605c707494fea4e10e | 05c9f1af21a698e09f7ec37a075624250e907262 | /samples/cloud_loadbalancers/session_persistence.py | 65361528513dff78dabf813b885ccaf5a90b79a5 | [
"Apache-2.0"
] | permissive | pycontribs/pyrax | 5f5a1d6816f5a831b1ae4b74ffaf438a1c0269a6 | 2397136b75e6fcc906ee406e9c1bc7aaef94387a | refs/heads/master | 2023-08-28T16:43:21.037208 | 2022-09-21T15:14:38 | 2022-09-21T15:14:38 | 5,975,139 | 10 | 27 | Apache-2.0 | 2021-07-12T21:23:11 | 2012-09-27T01:05:57 | Python | UTF-8 | Python | false | false | 1,492 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
clb = pyrax.cloud_loadbalancers
try:
lb = clb.list()[0]
except IndexError:
print("You do not have any load balancers yet.")
print("Please create one and then re-run this script.")
sys.exit()
print("Load Balancer:", lb)
orig = lb.session_persistence
print("Current setting of session persistence:", orig or '""')
print()
if orig:
print("Clearing...")
lb.session_persistence = ""
else:
print("Setting persistence to HTTP_COOKIE...")
lb.session_persistence = "HTTP_COOKIE"
print("New setting of session persistence:", lb.session_persistence or '""')
| [
"[email protected]"
] | |
570cc838272c8d6af88062cc6f7e249fd0b36979 | ea57ef44636ce151b3ef5322466cdfcb02482515 | /pendulum/constants.py | abc6ec06eacd7553dcf6ee58a8d094672a79966c | [
"MIT"
] | permissive | Sn3akyP3t3/pendulum | acb3dc5067576c4569a08b1d8a8ecfce918b4724 | 7ce170bdc64199d74e09e347402983f1bb015f63 | refs/heads/master | 2020-03-22T01:15:01.160870 | 2018-07-01T15:49:09 | 2018-07-01T15:49:09 | 139,292,657 | 0 | 0 | MIT | 2018-07-01T01:46:00 | 2018-07-01T01:46:00 | null | UTF-8 | Python | false | false | 2,836 | py | # The day constants
SUNDAY = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
# Number of X in Y.
YEARS_PER_CENTURY = 100
YEARS_PER_DECADE = 10
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
DAYS_PER_WEEK = 7
HOURS_PER_DAY = 24
MINUTES_PER_HOUR = 60
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = MINUTES_PER_HOUR * SECONDS_PER_MINUTE
SECONDS_PER_DAY = HOURS_PER_DAY * SECONDS_PER_HOUR
US_PER_SECOND = 1000000
# Formats
ATOM = 'YYYY-MM-DDTHH:mm:ssZ'
COOKIE = 'dddd, DD-MMM-YYYY HH:mm:ss zz'
ISO8601 = 'YYYY-MM-DDTHH:mm:ssZ'
ISO8601_EXTENDED = 'YYYY-MM-DDTHH:mm:ss.SSSSSSZ'
RFC822 = 'ddd, DD MMM YY HH:mm:ss ZZ'
RFC850 = 'dddd, DD-MMM-YY HH:mm:ss zz'
RFC1036 = 'ddd, DD MMM YY HH:mm:ss ZZ'
RFC1123 = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
RFC2822 = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
RFC3339 = ISO8601
RFC3339_EXTENDED = ISO8601_EXTENDED
RSS = 'ddd, DD MMM YYYY HH:mm:ss ZZ'
W3C = ISO8601
EPOCH_YEAR = 1970
DAYS_PER_N_YEAR = 365
DAYS_PER_L_YEAR = 366
USECS_PER_SEC = 1000000
SECS_PER_MIN = 60
SECS_PER_HOUR = 60 * SECS_PER_MIN
SECS_PER_DAY = SECS_PER_HOUR * 24
# 400-year chunks always have 146097 days (20871 weeks).
SECS_PER_400_YEARS = 146097 * SECS_PER_DAY
# The number of seconds in an aligned 100-year chunk, for those that
# do not begin with a leap year and those that do respectively.
SECS_PER_100_YEARS = (
(76 * DAYS_PER_N_YEAR + 24 * DAYS_PER_L_YEAR) * SECS_PER_DAY,
(75 * DAYS_PER_N_YEAR + 25 * DAYS_PER_L_YEAR) * SECS_PER_DAY
)
# The number of seconds in an aligned 4-year chunk, for those that
# do not begin with a leap year and those that do respectively.
SECS_PER_4_YEARS = (
(4 * DAYS_PER_N_YEAR + 0 * DAYS_PER_L_YEAR) * SECS_PER_DAY,
(3 * DAYS_PER_N_YEAR + 1 * DAYS_PER_L_YEAR) * SECS_PER_DAY
)
# The number of seconds in non-leap and leap years respectively.
SECS_PER_YEAR = (
DAYS_PER_N_YEAR * SECS_PER_DAY,
DAYS_PER_L_YEAR * SECS_PER_DAY
)
DAYS_PER_YEAR = (
DAYS_PER_N_YEAR,
DAYS_PER_L_YEAR
)
# The month lengths in non-leap and leap years respectively.
DAYS_PER_MONTHS = (
(-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31),
(-1, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
)
# The day offsets of the beginning of each (1-based) month in non-leap
# and leap years respectively.
# For example, in a leap year there are 335 days before December.
MONTHS_OFFSETS = (
(-1, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365),
(-1, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
)
DAY_OF_WEEK_TABLE = (
0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4
)
TM_SUNDAY = 0
TM_MONDAY = 1
TM_TUESDAY = 2
TM_WEDNESDAY = 3
TM_THURSDAY = 4
TM_FRIDAY = 5
TM_SATURDAY = 6
TM_JANUARY = 0
TM_FEBRUARY = 1
TM_MARCH = 2
TM_APRIL = 3
TM_MAY = 4
TM_JUNE = 5
TM_JULY = 6
TM_AUGUST = 7
TM_SEPTEMBER = 8
TM_OCTOBER = 9
TM_NOVEMBER = 10
TM_DECEMBER = 11
| [
"[email protected]"
] | |
1e1d7ca3bfe15837aaed003514b62088a040f6d2 | 868ac4e558cf5fe945e8b557564f34f79b3ad01e | /purity_fb/purity_fb_1dot11/models/snmp_agent_response.py | 3eb0a329ee36e940b618e7040ff1ee601a4825ff | [
"Apache-2.0"
] | permissive | mabdelhafez/purity_fb_python_client | f4253ce8497fb3cff648e0a0cd1e567f48129fa7 | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | refs/heads/master | 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 | NOASSERTION | 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null | UTF-8 | Python | false | false | 4,171 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.11 Python SDK
Pure Storage FlashBlade REST 1.11 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.11
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SnmpAgentResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[SnmpAgent]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
SnmpAgentResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this SnmpAgentResponse.
pagination information, only available in GET requests
:return: The pagination_info of this SnmpAgentResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this SnmpAgentResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this SnmpAgentResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this SnmpAgentResponse.
A list of SNMP agents.
:return: The items of this SnmpAgentResponse.
:rtype: list[SnmpAgent]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this SnmpAgentResponse.
A list of SNMP agents.
:param items: The items of this SnmpAgentResponse.
:type: list[SnmpAgent]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SnmpAgentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.