blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24cdcbecc7eafa54f83bb32c05eaadece9ae923c | 24caa6710105a060fab2e17147e6d56609939011 | /05-Importing_Data_in_Python_(Part_1)/03-Working_with_relational_databases_in_Python/09-Pandas_for_more_complex_querying.py | c6ed202627f94fe3a86b7922d627daf248673cce | [] | no_license | inverseundefined/DataCamp | 99607022ad3f899d7681ad1f70fcedab290e269a | 7226b6b6f41888c3610a884db9a226e013d37e56 | refs/heads/master | 2022-01-10T00:53:21.714908 | 2019-07-24T13:27:49 | 2019-07-24T13:27:49 | 198,280,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | '''
Pandas for more complex querying
Here, you'll become more familiar with the pandas function read_sql_query() by using it to execute a more complex query: a SELECT statement followed by both a WHERE clause AND an ORDER BY clause.
You'll build a DataFrame that contains the rows of the Employee table for which the EmployeeId is greater than or equal to 6 and you'll order these entries by BirthDate.
Instructions
100 XP
Using the function create_engine(), create an engine for the SQLite database Chinook.sqlite and assign it to the variable engine.
Use the pandas function read_sql_query() to assign to the variable df the DataFrame of results from the following query: select all records from the Employee table where the EmployeeId is greater than or equal to 6 and ordered by BirthDate (make sure to use WHERE and ORDER BY in this precise order).
Take Hint (-30 XP)
'''
# Import packages
from sqlalchemy import create_engine
import pandas as pd
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Execute query and store records in DataFrame: df
df = pd.read_sql_query('SELECT * FROM Employee WHERE EmployeeId >= 6 ORDER BY BirthDate',engine)
# Print head of DataFrame
print(df.head())
| [
"[email protected]"
] | |
0f9bf124f49507e8e88f9c99a67d39996068f0e1 | f090c3e0faa70cf0ef7c4be99cb894630bce2842 | /scripts_201410/simpleMeasurements/FFT/micromotioncompensate.py | 61e0fc2a67cd09a122b42c0821e42d4d1b12e7ff | [] | no_license | HaeffnerLab/resonator | 157d1dc455209da9b7de077157bda53b4883c8b7 | 7c2e377fdc45f6c1ad205f8bbc2e6607eb3fdc71 | refs/heads/master | 2021-01-09T20:48:03.587634 | 2016-09-22T18:40:17 | 2016-09-22T18:40:17 | 6,715,345 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from FFT import measureFFT
import numpy as np
import labrad
import time
cxn = labrad.connect()
dv = cxn.data_vault
recordTime = 0.5 #seconds
average = 4
freqSpan = 50.0 #Hz
freqOffset = -889 #Hz, the offset between the counter clock and the rf synthesizer clock
#setting up FFT
fft = measureFFT(cxn, recordTime, average, freqSpan, freqOffset, savePlot = False)
#saving
dv.cd(['','QuickMeasurements','FFT', 'Compensation'],True)
name = dv.new('FFT',[('number', 'n')], [('FFTPeak','Arb','Arb')] )
dv.add_parameter('plotLive',True)
print 'Saving {}'.format(name)
for j in range(100):
micromotion = fft.getPeakArea(ptsAround = 3)
dv.add(j, micromotion)
print micromotion
| [
"[email protected]"
] | |
df7b27de7032e41159d2757d07e22dd5bf52718c | cad91ae76d2746a6c28ddda0f33a58f9d461378f | /TensorFlow2/LanguageModeling/BERT/dllogger_class.py | be211785d770825978dc9b4cb32631e11f2435bc | [] | no_license | NVIDIA/DeepLearningExamples | fe677521e7e2a16e3cb0b77e358f9aab72f8c11a | a5388a45f71a949639b35cc5b990bd130d2d8164 | refs/heads/master | 2023-08-31T20:57:08.798455 | 2023-08-23T10:09:12 | 2023-08-23T10:09:12 | 131,881,622 | 11,838 | 3,124 | null | 2023-08-28T16:57:33 | 2018-05-02T17:04:05 | Jupyter Notebook | UTF-8 | Python | false | false | 2,852 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from dllogger import Logger, StdOutBackend, JSONStreamBackend, Verbosity
import numpy
class dllogger_class():
def format_step(self, step):
if isinstance(step, str):
return step
elif isinstance(step, int):
return "Iteration: {} ".format(step)
elif len(step) > 0:
return "Iteration: {} ".format(step[0])
else:
return ""
def __init__(self, log_path="bert_dllog.json"):
self.logger = Logger([
StdOutBackend(Verbosity.DEFAULT, step_format=self.format_step),
JSONStreamBackend(Verbosity.VERBOSE, log_path),
])
self.logger.metadata("mlm_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"})
self.logger.metadata("nsp_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"})
self.logger.metadata("avg_loss_step", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"})
self.logger.metadata("total_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"})
self.logger.metadata("loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"})
self.logger.metadata("f1", {"unit": None, "format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"})
self.logger.metadata("precision", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"})
self.logger.metadata("recall", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"})
self.logger.metadata("mcc", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"})
self.logger.metadata("exact_match", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"})
self.logger.metadata(
"throughput_train",
{"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "TRAIN"},
)
self.logger.metadata(
"throughput_inf",
{"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "VAL"},
)
self.logger.metadata(
"throughput_val",
{"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "VAL"},
)
| [
"[email protected]"
] | |
6bec030a51b5bb4b0f123d9777dc394b085cf5e0 | 9eaa2c64a777bd24a3cccd0230da5f81231ef612 | /study/1905/month01/code/Stage5/day16/demo06_canny.py | 8cecd5c5324a39778bbcead274373be63fe735f3 | [
"MIT"
] | permissive | Dython-sky/AID1908 | 4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c | 46cd54a7b36b5f009974f2bbb7005a4ad440ca1a | refs/heads/master | 2022-04-14T12:23:30.426270 | 2020-04-01T18:05:19 | 2020-04-01T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | """
demo06_canny.py 边缘识别
"""
import cv2 as cv
original = cv.imread('../ml_data/chair.jpg',cv.IMREAD_GRAYSCALE)
print(original)
cv.imshow('original',original)
# 索贝尔边缘识别
sobel = cv.Sobel(original,cv.CV_64F,0,1,ksize=5)
cv.imshow('sobel',sobel)
# 拉普斯边缘识别
laplacian = cv.Laplacian(original,cv.CV_64F)
cv.imshow('laplacian',laplacian)
# Canny边缘识别
canny = cv.Canny(original,50,200)
cv.imshow('canny',canny)
cv.waitKey() | [
"[email protected]"
] | |
81eb6216326223d83778b2d3bd64fbec29228251 | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2002/py02/day03/game_role.py | 65eea729683ff4a6c379867472ab679b07dec8fa | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | class Role:
def __init__(self, name, weapon):
# 构造器方法,实例化时自动调用,注意,self不是关键字,可以是任何自定义的变量
# 绑定在实例身上的属性,在类中任意位置可见可用
self.name = name
self.weapon = weapon
def show_me(self):
# 绑定在实例身上的属性,在类中任意位置可见可用
print('我是%s,我擅用%s' % (self.name, self.weapon))
def speak(self, words):
# 没有绑定在实例身上的变量,只是局部变量,只能用在函数中
hh = 'Hahaha'
print(hh)
print(words)
if __name__ == '__main__':
# 实例本身将会自动作为第一个参数传递,本例中是lb
lb = Role('吕布', '方天画戟') # 实例化,创建具体的对象
print(lb.name, lb.weapon)
lb.show_me()
lb.speak('马中赤兔,人中吕布')
| [
"[email protected]"
] | |
3d87924ec7d7fd9fcc0bcf9142588b70d3044ea6 | 04e2a63c2a393ec3782a482b1734b6462c885d5d | /univelcity/open_file.py | a5d41c60faaaf3883d1b9e76f60d5a9ad4ae687c | [] | no_license | AzeezBello/_python | c1d671efbca2ed2ca7d65513efd2c55b496ddad7 | 266bc5aed9bfb93ea93b07712b48406331a9a327 | refs/heads/master | 2020-05-17T18:09:49.133120 | 2019-05-16T07:08:50 | 2019-05-16T07:08:50 | 183,876,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | # file = open("death_causes.csv", "r")
# index = 0
# for line in file:
# index += 1
# print(line.split(","))
# if index == 3:
# break
# # Year,Cause Name,Cause Name,State,Deaths,Age-adjusted Death Rate
# file = open("death_causes.csv", "r")
# deaths = 0
# count = 0
# for line in file:
# if count == 0:
# pass
# else:
# raw = line.split(",")
# print(raw)
# if raw[0] == "2014":
# deaths += int(raw[4])
# count += 1
# print(deaths/365)
# Year,Cause Name,Cause Name,State,Deaths,Age-adjusted Death Rate
# with open("twist.txt", "r") as file:
# for line in file:
# print(line)
# file.close()
import pymysql.cursors
class Mortality:
def __init__(self, year, cause_name_full, cause_name, state, deaths, age_adjusted_death_rate):
self.year = (year)
self.cause_name_full = cause_name_full
self.cause_name = cause_name
self.state = state
self.deaths = (deaths)
self.age_adjusted_death_rate = age_adjusted_death_rate[:-1]
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
def create_table(name):
with connection.cursor() as cursor:
# Create a new record
try:
sql = f"""CREATE TABLE {name}
(id int NOT NULL PRIMARY KEY AUTO_INCREMENT,
year INT(4),
cause_name_full TEXT,
cause_name TEXT,
state VARCHAR(50),
deaths VARCHAR(50),
age_adjusted_death_rate VARCHAR(50))"""
cursor.execute(sql)
connection.commit()
except:
# connection is not autocommit by default. So you must commit to save
# your changes.
print('Table Exists')
def open_file():
file = open("death_causes.csv", "r")
count = 0
for line in file:
if count == 0:
pass
else:
raw = line.split(",")
# print(raw)
new_mortality_object = Mortality( year = raw[0], cause_name_full = raw[1], cause_name= raw[2], state = raw[3], deaths = raw[4], age_adjusted_death_rate = raw[5])
post_to_db(new_mortality_object)
count += 1
def post_to_db(mortality_object):
with connection.cursor() as cursor:
# Create a new record
sql = f"""insert into mortality_rate (year, cause_name_full, cause_name, state, deaths, age_adjusted_death_rate)
values ("{mortality_object.year}", "{mortality_object.cause_name_full}", "{mortality_object.cause_name}", "{mortality_object.state}", "{mortality_object.deaths}", "{mortality_object.age_adjusted_death_rate}")"""
# print(sql)
cursor.execute(sql)
connection.commit()
#CREATE TABLE IN DATABASE
create_table("mortality_rate")
#THEN PUSH FILES INTO TABLE
open_file() | [
"[email protected]"
] | |
0332c4d5e620cd87f9b70d77e4f57a67c07e72a3 | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/MoveSpacesFrontString.py | af9641cf57932b4daa0e84d62d196bc3aa65de22 | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | '''
Move spaces to front of string in single traversal
Given a string that has set of words and spaces, write a program to move all spaces to front of string, by traversing the string only once.
Examples:
Input : str = "geeks for geeks"
Output : ste = " geeksforgeeks"
Input : str = "move these spaces to beginning"
Output : str = " movethesespacestobeginning"
There were four space characters in input,
all of them should be shifted in front.
'''
def MoveSpacesFrontString(str1):
output_list=[]
lst=str1.split(' ')
prev_word=''
for word in lst:
if len(word)==0:
output_list.append(' ')
else:
if len(prev_word)>0:
output_list.append(' ')
prev_word=word
output_list.append(''.join(lst))
return ''.join(output_list)
def main():
str1="geeks for geeks"
print(MoveSpacesFrontString(str1))
str1 = "move these spaces to beginning"
print(MoveSpacesFrontString(str1))
str1 = "move these spaces to beginning"
print(MoveSpacesFrontString(str1))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
b7aade2484b165d22de966e987fd39bcf4cf37f0 | 286df6528096b6393b61d3ecb3b7002cb9a7b983 | /python/ql/test/library-tests/frameworks/aiohttp/response_test.py | 1988f4435604cade3227c27d40ba902f6661df59 | [
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"MIT"
] | permissive | Inncee81/codeql | ed620df0ae7b706943eccd92af37e037f540f6a4 | 38a38fd2c145628472d14c9e9d6ca812fd525793 | refs/heads/main | 2023-06-13T01:23:30.086459 | 2021-06-22T10:59:44 | 2021-06-22T10:59:44 | 379,254,229 | 1 | 0 | MIT | 2021-06-22T12:02:02 | 2021-06-22T12:02:01 | null | UTF-8 | Python | false | false | 3,173 | py | from aiohttp import web
routes = web.RouteTableDef()
@routes.get("/raw_text") # $ routeSetup="/raw_text"
async def raw_text(request): # $ requestHandler
return web.Response(text="foo") # $ HttpResponse mimetype=text/plain responseBody="foo"
@routes.get("/raw_body") # $ routeSetup="/raw_body"
async def raw_body(request): # $ requestHandler
return web.Response(body=b"foo") # $ HttpResponse mimetype=application/octet-stream responseBody=b"foo"
@routes.get("/html_text") # $ routeSetup="/html_text"
async def html_text(request): # $ requestHandler
return web.Response(text="foo", content_type="text/html") # $ HttpResponse mimetype=text/html responseBody="foo"
@routes.get("/html_body") # $ routeSetup="/html_body"
async def html_body(request): # $ requestHandler
return web.Response(body=b"foo", content_type="text/html") # $ HttpResponse mimetype=text/html responseBody=b"foo"
@routes.get("/html_body_set_later") # $ routeSetup="/html_body_set_later"
async def html_body_set_later(request): # $ requestHandler
resp = web.Response(body=b"foo") # $ HttpResponse mimetype=application/octet-stream responseBody=b"foo"
resp.content_type = "text/html" # $ MISSING: mimetype=text/html
return resp
# Each HTTP status code has an exception
# see https://docs.aiohttp.org/en/stable/web_quickstart.html#exceptions
@routes.get("/through_200_exception") # $ routeSetup="/through_200_exception"
async def through_200_exception(request): # $ requestHandler
raise web.HTTPOk(text="foo") # $ HttpResponse mimetype=text/plain responseBody="foo"
@routes.get("/through_200_exception_html") # $ routeSetup="/through_200_exception_html"
async def through_200_exception(request): # $ requestHandler
exception = web.HTTPOk(text="foo") # $ HttpResponse mimetype=text/plain responseBody="foo"
exception.content_type = "text/html" # $ MISSING: mimetype=text/html
raise exception
@routes.get("/through_404_exception") # $ routeSetup="/through_404_exception"
async def through_404_exception(request): # $ requestHandler
raise web.HTTPNotFound(text="foo") # $ HttpResponse mimetype=text/plain responseBody="foo"
@routes.get("/redirect_301") # $ routeSetup="/redirect_301"
async def redirect_301(request): # $ requestHandler
if not "kwarg" in request.url.query:
raise web.HTTPMovedPermanently("/login") # $ HttpResponse HttpRedirectResponse mimetype=application/octet-stream redirectLocation="/login"
else:
raise web.HTTPMovedPermanently(location="/logout") # $ HttpResponse HttpRedirectResponse mimetype=application/octet-stream redirectLocation="/logout"
@routes.get("/redirect_302") # $ routeSetup="/redirect_302"
async def redirect_302(request): # $ requestHandler
if not "kwarg" in request.url.query:
raise web.HTTPFound("/login") # $ HttpResponse HttpRedirectResponse mimetype=application/octet-stream redirectLocation="/login"
else:
raise web.HTTPFound(location="/logout") # $ HttpResponse HttpRedirectResponse mimetype=application/octet-stream redirectLocation="/logout"
if __name__ == "__main__":
app = web.Application()
app.add_routes(routes)
web.run_app(app)
| [
"[email protected]"
] | |
8400f0f8f16237cd362e0cc37f3436e13b3d755f | 82f6a6c50a1fef2d7522a43cc4f60e5ff80b37a8 | /solutions/Longest Word in Dictionary through Deleting/solution.py | 267c70a98bb61b70fe13d5f17a5e27cb662c0fae | [
"MIT"
] | permissive | nilax97/leetcode-solutions | ca0f9545ce70975617738f053e0935fac00b04d4 | d3c12f2b289662d199510e0431e177bbf3cda121 | refs/heads/master | 2023-05-14T02:21:48.893716 | 2021-06-08T13:16:53 | 2021-06-08T13:16:53 | 374,466,870 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | class Solution:
def findLongestWord(self, s: str, d: List[str]) -> str:
s = '_' + s
n, nxt = len(s), [{} for _ in s]
for i, c in enumerate(s):
for j in range(i-1, -1, -1):
nxt[j][c] = i
if s[j] == c: break
def find(word):
i = 0
for c in word:
i = nxt[i].get(c)
if i is None: return False
return True
res = ""
for word in d:
if find(word) and (not res or (-len(word), word) < (-len(res), res)):
res = word
return res
| [
"[email protected]"
] | |
e9a1fed6a23067a05df9d37a4204e81098c48194 | b9bf3b34b59ec8e566b7ad6e58b7d0429370d6bd | /gunicorn_conf.py | 3b6bec2f43185136d7017ecf5ea3fe59f9f34931 | [] | no_license | dutradda/chunli | 7eea614b6c6c3c0738bec2f15d8224430e450a82 | 54e4385a34f805a2c13acdf85aec98d63c4eaff7 | refs/heads/master | 2021-08-16T09:22:45.388575 | 2020-09-03T12:55:33 | 2020-09-03T12:55:33 | 217,397,141 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import os
import redis
def worker_exit(server, worker):
r = redis.Redis.from_url(os.environ.get('REDIS_TARGET', 'redis://'))
r.publish('chunli:distributed', 'stop')
def child_exit(server, worker):
r = redis.Redis.from_url(os.environ.get('REDIS_TARGET', 'redis://'))
r.publish('chunli:distributed', 'stop')
| [
"[email protected]"
] | |
8fe248d9822eea62924d8b53b9b960bb32bfe359 | 6541487fb7df24610e5c61aa30d4a39b9117b427 | /tests/test_math_helpers.py | 6cf87e9dc244968d69684b98f2d4a3ab0f4b7c6f | [
"MIT"
] | permissive | theY4Kman/birdfeeder | 0e1f90a96b1607c0675ea3ab70a00fc99b97e7ac | 25503a138fe01589fb28317ae0f3e281d6ce1961 | refs/heads/master | 2023-04-21T11:23:07.699322 | 2021-03-24T08:36:13 | 2021-03-24T08:37:40 | 368,974,412 | 0 | 0 | MIT | 2021-05-19T19:03:43 | 2021-05-19T19:03:43 | null | UTF-8 | Python | false | false | 510 | py | from decimal import Decimal
from birdfeeder.math_helpers import safe_div, safe_mean
def test_safe_div_basic():
assert safe_div(10, 2) == 5.0
def test_safe_div_basic_decimal():
assert safe_div(Decimal(10), Decimal(2)) == Decimal(5)
def test_safe_div_zero_div():
assert safe_div(10, 0) == 0.0
def test_safe_mean_basic():
assert safe_mean([2, 4]) == 3.0
def test_safe_mean_empty():
assert safe_mean([]) == 0.0
def test_safe_mean_zero_values():
assert safe_mean([0, 0]) == 0.0
| [
"[email protected]"
] | |
b9b8b6190fea295a20706bf72e02f8bd6b16d816 | 0a15660807aee7d2fccbef1a3e633cabd1deb972 | /subway/models.py | 6dd5ae55f71cff97c7052df438f87e6a8c662e4e | [] | no_license | chirs/hs | 7860e77230cd2577cac79539039f0e2a7590ef35 | f1985e11a73b29fa8bf4fd1725c529ec8e61cb5b | refs/heads/master | 2021-01-21T10:42:15.789926 | 2017-02-28T20:12:31 | 2017-02-28T20:12:31 | 83,474,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, MetaData, ForeignKey, Text, Float
from sqlalchemy.orm import mapper
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
class Station(Base):
"""
A subway station, like "Atlantic - Pacific"
"""
__tablename__ = 'stations'
id = Column(Integer, primary_key=True)
sid = Column(String)
name = Column(String)
lat = Column(Float)
lng = Column(Float)
def __init__(self, sid, name, lat, lng):
self.sid = sid
self.name = name
self.lat = lat
self.lng = lng
class SubStation(Base):
"""
A subway substation, like 116N [116th Street North]
"""
__tablename__ = 'substations'
id = Column(Integer, primary_key=True)
pid = Column(Integer, ForeignKey('stations.id'))
name = Column(String)
class Route(Base):
"""
A subway route like 1 or D.
"""
__tablename__ = 'routes'
id = Column(Integer, primary_key=True)
rid = Column(String)
name = Column(String)
description = Column(String)
color = Column(String)
def __init__(self, rid, name, description, color):
self.rid = rid
self.name = name
self.description = description
self.color = color
Base.metadata.create_all(engine)
| [
"[email protected]"
] | |
dc105c937af95e74bf4880b57361a7470c141909 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/jedi/evaluate/dynamic.py | fe9d28e5d70906257d64b55fcc219bbc2f5d3c6a | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,165 | py | """
One of the really important features of |jedi| is to have an option to
understand code like this::
def foo(bar):
bar. # completion here
foo(1)
There's no doubt wheter bar is an ``int`` or not, but if there's also a call
like ``foo('str')``, what would happen? Well, we'll just show both. Because
that's what a human would expect.
It works as follows:
- |Jedi| sees a param
- search for function calls named ``foo``
- execute these calls and check the input.
"""
from jedi import debug, settings
from jedi.evaluate import imports
from jedi.evaluate.arguments import TreeArguments
from jedi.evaluate.base_context import ContextSet
from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate.context import ModuleContext, instance
from jedi.evaluate.helpers import is_stdlib_path
from jedi.evaluate.param import create_default_params
from jedi.evaluate.utils import to_list
from jedi.parser_utils import get_parent_scope
from parso.python import tree
MAX_PARAM_SEARCHES = 20
class MergedExecutedParams(object):
"""
Simulates being a parameter while actually just being multiple params.
"""
def __init__(self, executed_params):
self._executed_params = executed_params
def infer(self):
return ContextSet.from_sets(p.infer() for p in self._executed_params)
@debug.increase_indent
def search_params(evaluator, execution_context, funcdef):
"""
A dynamic search for param values. If you try to complete a type:
>>> def func(foo):
... foo
>>> func(1)
>>> func("")
It is not known what the type ``foo`` without analysing the whole code. You
have to look for all calls to ``func`` to find out what ``foo`` possibly
is.
"""
if not settings.dynamic_params:
return create_default_params(execution_context, funcdef)
evaluator.dynamic_params_depth += 1
try:
path = execution_context.get_root_context().py__file__()
if path is not None and is_stdlib_path(path):
# We don't want to search for usages in the stdlib. Usually people
# don't work with it (except if you are a core maintainer, sorry).
# This makes everything slower. Just disable it and run the tests,
# you will see the slowdown, especially in 3.6.
return create_default_params(execution_context, funcdef)
if funcdef.type == 'lambdef':
string_name = _get_lambda_name(funcdef)
if string_name is None:
return create_default_params(execution_context, funcdef)
else:
string_name = funcdef.name.value
debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')
try:
module_context = execution_context.get_root_context()
function_executions = _search_function_executions(
evaluator,
module_context,
funcdef,
string_name=string_name,
)
if function_executions:
zipped_params = zip(*list(
function_execution.get_params()
for function_execution in function_executions
))
params = [MergedExecutedParams(executed_params) for executed_params in zipped_params]
# Evaluate the ExecutedParams to types.
else:
return create_default_params(execution_context, funcdef)
finally:
debug.dbg('Dynamic param result finished', color='MAGENTA')
return params
finally:
evaluator.dynamic_params_depth -= 1
@evaluator_function_cache(default=None)
@to_list
def _search_function_executions(evaluator, module_context, funcdef, string_name):
"""
Returns a list of param names.
"""
compare_node = funcdef
if string_name == '__init__':
cls = get_parent_scope(funcdef)
if isinstance(cls, tree.Class):
string_name = cls.name.value
compare_node = cls
found_executions = False
i = 0
for for_mod_context in imports.get_modules_containing_name(
evaluator, [module_context], string_name):
if not isinstance(module_context, ModuleContext):
return
for name, trailer in _get_possible_nodes(for_mod_context, string_name):
i += 1
# This is a simple way to stop Jedi's dynamic param recursion
# from going wild: The deeper Jedi's in the recursion, the less
# code should be evaluated.
if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES:
return
random_context = evaluator.create_context(for_mod_context, name)
for function_execution in _check_name_for_execution(
evaluator, random_context, compare_node, name, trailer):
found_executions = True
yield function_execution
# If there are results after processing a module, we're probably
# good to process. This is a speed optimization.
if found_executions:
return
def _get_lambda_name(node):
stmt = node.parent
if stmt.type == 'expr_stmt':
first_operator = next(stmt.yield_operators(), None)
if first_operator == '=':
first = stmt.children[0]
if first.type == 'name':
return first.value
return None
def _get_possible_nodes(module_context, func_string_name):
try:
names = module_context.tree_node.get_used_names()[func_string_name]
except KeyError:
return
for name in names:
bracket = name.get_next_leaf()
trailer = bracket.parent
if trailer.type == 'trailer' and bracket == '(':
yield name, trailer
def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
from jedi.evaluate.context.function import FunctionExecutionContext
def create_func_excs():
arglist = trailer.children[1]
if arglist == ')':
arglist = None
args = TreeArguments(evaluator, context, arglist, trailer)
if value_node.type == 'classdef':
created_instance = instance.TreeInstance(
evaluator,
value.parent_context,
value,
args
)
for execution in created_instance.create_init_executions():
yield execution
else:
yield value.get_function_execution(args)
for value in evaluator.goto_definitions(context, name):
value_node = value.tree_node
if compare_node == value_node:
for func_execution in create_func_excs():
yield func_execution
elif isinstance(value.parent_context, FunctionExecutionContext) and \
compare_node.type == 'funcdef':
# Here we're trying to find decorators by checking the first
# parameter. It's not very generic though. Should find a better
# solution that also applies to nested decorators.
params = value.parent_context.get_params()
if len(params) != 1:
continue
values = params[0].infer()
nodes = [v.tree_node for v in values]
if nodes == [compare_node]:
# Found a decorator.
module_context = context.get_root_context()
execution_context = next(create_func_excs())
for name, trailer in _get_possible_nodes(module_context, params[0].string_name):
if value_node.start_pos < name.start_pos < value_node.end_pos:
random_context = evaluator.create_context(execution_context, name)
iterator = _check_name_for_execution(
evaluator,
random_context,
compare_node,
name,
trailer
)
for function_execution in iterator:
yield function_execution
| [
"[email protected]"
] | |
b0da7bdba534730f35505b2301bd30a30bf8b8a2 | 26192962dc2627e7ca5f0e3b249c3fabcf52442c | /Python/AD-HOC/1196 - WERTYU.py | f1d867b9f14a29527c0d7a750ed75bcb36716f79 | [] | no_license | PierreVieira/URI | 77278ccb1724ca206ab2c12afbea1e51fa08ff73 | c1eb211c788d26b5cb9bedf5dda4147a2961fa19 | refs/heads/master | 2023-04-10T07:03:13.954639 | 2023-03-22T00:18:28 | 2023-03-22T00:18:28 | 189,321,748 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | """
Autor: Pierre Vieira
Data da submissão: 02/02/2020 16:48:12
"""
linha = "`1234567890-=QWERTYUIOP[]\\ASDFGHJKL;'ZXCVBNM,.'"
while True:
s = ''
try:
frase = input()
except EOFError:
break
else:
for c in frase:
if c == ' ':
s += c
else:
s += linha[linha.find(c)-1]
print(s)
| [
"[email protected]"
] | |
734bba3ac3df513251e2431b420b08c3a0bb20f7 | c2643fdff3185b659c2c7fa807d8b8d345a90343 | /tests/test_basic.py | 4bea68de088fd5206824e30ac834120108554bc5 | [
"BSD-2-Clause"
] | permissive | auxten/fhost | b39ae209a056b301e737d176f8f12dcafd82cfa2 | 6536c4955e13fd67c939a6fc6cc687d29e976d15 | refs/heads/master | 2021-01-16T00:35:43.304418 | 2012-06-25T10:17:52 | 2012-06-25T10:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #
## BEGIN LICENSE BLOCK
#
# Copyright (c) <2012>, Raul Perez <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
## END LICENSE BLOCK
#
import context
import unittest
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3ce934caaa6e0a49902a84d3e6ce84ac3d1aac37 | 5cb8df4d10cd1a1d77f227ea8e1b311744750d5b | /generate.py | b4ba55cf4e1d1accfe70b88346848e422bbf65cf | [
"CC0-1.0"
] | permissive | YoonGenwu/hearthstonejson | 388d46c5c082cde8389bef1011dded7d46fea7dc | 3d6709f99dc7d0c0b75ccf441cfebec00f48a184 | refs/heads/master | 2021-01-15T11:42:57.006639 | 2016-02-17T01:12:14 | 2016-02-17T01:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | #!/usr/bin/env python
import os
import json
import sys
from argparse import ArgumentParser
from enum import IntEnum
from hearthstone.dbf import Dbf
from hearthstone.cardxml import load
from hearthstone.enums import CardType, Faction, GameTag, Locale, LOCALIZED_TAGS
MECHANICS_TAGS = [
GameTag.ADJACENT_BUFF,
GameTag.AURA,
GameTag.BATTLECRY,
GameTag.CHARGE,
GameTag.COMBO,
GameTag.DEATHRATTLE,
GameTag.DIVINE_SHIELD,
GameTag.ENRAGED,
GameTag.FORGETFUL,
GameTag.FREEZE,
GameTag.INSPIRE,
GameTag.MORPH,
GameTag.OVERLOAD,
GameTag.POISONOUS,
GameTag.SECRET,
GameTag.SILENCE,
GameTag.STEALTH,
GameTag.SPELLPOWER,
GameTag.TAG_ONE_TURN_EFFECT,
GameTag.TAUNT,
GameTag.TREASURE,
GameTag.WINDFURY,
GameTag.ImmuneToSpellpower,
GameTag.InvisibleDeathrattle,
]
def json_dump(obj, filename, pretty=False):
print("Writing to %r" % (filename))
if pretty:
kwargs = {"sort_keys": True, "indent": "\t", "separators": (",", ": ")}
else:
kwargs = {"separators": (",", ":")}
with open(filename, "w", encoding="utf8") as f:
json.dump(obj, f, ensure_ascii=False, **kwargs)
def show_field(card, k, v):
if k == "cost" and card.type not in (CardType.ENCHANTMENT, CardType.HERO):
return True
if k == "faction" and v == Faction.NEUTRAL:
return False
if k == "attack" and card.type in (CardType.MINION, CardType.WEAPON):
return True
if k == "health" and card.type in (CardType.MINION, CardType.HERO):
return True
if k == "durability" and card.type == CardType.WEAPON:
return True
return bool(v)
def get_mechanics(card):
ret = []
for tag in MECHANICS_TAGS:
value = card.tags.get(tag, 0)
if value:
ret.append(tag.name)
return ret
TAG_NAMES = {
GameTag.CARDNAME: "name",
GameTag.FLAVORTEXT: "flavortext",
GameTag.CARDTEXT_INHAND: "text",
GameTag.CardTextInPlay: "textInPlay",
GameTag.HOW_TO_EARN: "howToEarn",
GameTag.HOW_TO_EARN_GOLDEN: "howToEarnGolden",
GameTag.TARGETING_ARROW_TEXT: "targetingArrowText",
}
def serialize_card(card):
ret = {
"id": card.id,
"name": card.name,
"flavor": card.flavortext,
"text": card.description,
"textInPlay": card.playtext,
"howToEarn": card.how_to_earn,
"howToEarnGolden": card.how_to_earn_golden,
"targetingArrowText": card.targeting_arrow_text,
"artist": card.artist,
"faction": card.faction,
"playerClass": card.card_class,
"race": card.race,
"rarity": card.rarity,
"set": card.card_set,
"type": card.type,
"collectible": card.collectible,
"attack": card.atk,
"cost": card.cost,
"durability": card.durability,
"health": card.health,
}
ret = {k: v for k, v in ret.items() if show_field(card, k, v)}
for k, v in ret.items():
if isinstance(v, IntEnum):
ret[k] = v.name
mechanics = get_mechanics(card)
if mechanics:
ret["mechanics"] = mechanics
if card.entourage:
ret["entourage"] = card.entourage
if card.requirements:
ret["playRequirements"] = {k.name: v for k, v in card.requirements.items()}
if card.craftable:
ret["dust"] = card.crafting_costs + card.disenchant_costs
# if card.choose_cards:
# ret["chooseCards"] = card.choose_cards
return ret
def export_cards_to_file(cards, filename, locale):
ret = []
for card in cards:
card.locale = locale
ret.append(serialize_card(card))
json_dump(ret, filename)
def export_all_locales_cards_to_file(cards, filename):
ret = []
for card in cards:
obj = serialize_card(card)
for tag in LOCALIZED_TAGS:
if tag in TAG_NAMES:
value = card._localized_tags[tag]
if value:
obj[TAG_NAMES[tag]] = value
ret.append(obj)
json_dump(ret, filename)
def write_cardbacks(dbf, filename, locale):
ret = []
for record in dbf.records:
ret.append({
"id": record["ID"],
"note_desc": record["NOTE_DESC"],
"source": record["SOURCE"],
"enabled": record["ENABLED"],
"name": record.get("NAME", {}).get(locale.name, ""),
"prefab_name": record.get("PREFAB_NAME", ""),
"description": record.get("DESCRIPTION", {}).get(locale.name, ""),
"source_description": record.get("SOURCE_DESCRIPTION", {}).get(locale.name, ""),
})
json_dump(ret, filename)
def main():
parser = ArgumentParser()
parser.add_argument(
"-o", "--output-dir",
type=str,
dest="output_dir",
default="out",
help="Output directory"
)
parser.add_argument(
"-i", "--input-dir",
type=str,
dest="input_dir",
default="hs-data",
help="Input hs-data directory"
)
args = parser.parse_args(sys.argv[1:])
db, xml = load(os.path.join(args.input_dir, "CardDefs.xml"))
dbf_path = os.path.join(args.input_dir, "DBF", "CARD_BACK.xml")
if not os.path.exists(dbf_path):
print("Skipping card back generation (%s does not exist)" % (dbf_path))
dbf = None
else:
dbf = Dbf.load(dbf_path)
cards = db.values()
collectible_cards = [card for card in cards if card.collectible]
for locale in Locale:
if locale.unused:
continue
basedir = os.path.join(args.output_dir, locale.name)
if not os.path.exists(basedir):
os.makedirs(basedir)
filename = os.path.join(basedir, "cards.json")
export_cards_to_file(cards, filename, locale.name)
filename = os.path.join(basedir, "cards.collectible.json")
export_cards_to_file(collectible_cards, filename, locale.name)
if dbf is not None:
filename = os.path.join(basedir, "cardbacks.json")
write_cardbacks(dbf, filename, locale)
# Generate merged locales
basedir = os.path.join(args.output_dir, "all")
if not os.path.exists(basedir):
os.makedirs(basedir)
filename = os.path.join(basedir, "cards.json")
export_all_locales_cards_to_file(cards, filename)
filename = os.path.join(basedir, "cards.collectible.json")
export_all_locales_cards_to_file(collectible_cards, filename)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1e1c3159a79488453e4810b9362f7850f72e9c90 | f68eda51246c95597def569224f3b56d4c3700e7 | /top/api/rest/SellercenterUserPermissionsGetRequest.py | a3f561db414e9ebc103b8c2d04ac8c7b445babb9 | [
"MIT",
"BSD-3-Clause"
] | permissive | stoensin/taobao-openapi | 47de8fb29ae2d8ce47d4fce07c0ccaeaee1ef91f | 202a9df2085229838541713bd24433a90d07c7fc | refs/heads/main | 2023-07-17T02:17:51.527455 | 2021-08-25T15:08:49 | 2021-08-25T15:08:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | '''
Created by auto_sdk on 2018.07.25
'''
from top.api.base import RestApi
class SellercenterUserPermissionsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.nick = None
def getapiname(self):
return 'taobao.sellercenter.user.permissions.get'
| [
"[email protected]"
] | |
1daefdaaf3cdc9dbbd4d888acd5c05d94d6285dd | 85c337f0364f1452c068b7e93421b3e24af85358 | /MzManage/manage.py | 362fb7bb3d7af3d8d0dfab2d09b3c4fb6b0b78a7 | [] | no_license | hornLK/AuthSystemWeb | 9518f23453f910e17c516db26ea3a00fe0d0c806 | c2c03ff2133151889a2ecc205a753a0eb2bbfd91 | refs/heads/master | 2022-12-14T19:18:00.560077 | 2018-04-19T12:39:14 | 2018-04-19T12:39:14 | 130,317,561 | 0 | 0 | null | 2022-12-08T00:59:04 | 2018-04-20T06:17:08 | JavaScript | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MzManage.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
347502a5063ca3f7fdbb96e81aadf62f71a48dae | 97e534b26a76bf0d954e166841179979748bcfa2 | /objects/migrations/0046_auto_20180625_0823.py | d6855e81eb891d0362368b4d406690be5fbde2c7 | [] | no_license | mehdi1361/http_server | 3a8bd73ce44307ee2b7761d1211671ca8cb0f3ba | d8a962c55165ef0237bfb26d27d9cfa11a415a5d | refs/heads/develop | 2022-12-11T00:44:11.089407 | 2019-01-20T12:02:48 | 2019-01-20T12:02:48 | 166,656,299 | 0 | 0 | null | 2022-12-07T23:53:22 | 2019-01-20T12:02:05 | HTML | UTF-8 | Python | false | false | 958 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-25 08:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('objects', '0045_auto_20180625_0724'),
]
operations = [
migrations.AddField(
model_name='league',
name='play_off_start_gem_1',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='play off start gem 1'),
),
migrations.AddField(
model_name='league',
name='play_off_start_gem_2',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='play off start gem 2'),
),
migrations.AlterField(
model_name='league',
name='play_off_start_gem',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='play off start gem '),
),
]
| [
"[email protected]"
] | |
914e5a276b7849b267a4458ca7c0afd16ec3f18e | 3f73ce74b6fdfb7966abb71a98f4986edd727c5f | /lib/config.py | 9d9e5784d61265a408685b6fae7a08e8e51d01e0 | [
"MIT"
] | permissive | yuta-komura/amateras | 9c2efd310b18f159b1354864d65f9894ab93737f | cf8cc8fe0b5d8c382090fd1784a3ce96e6953157 | refs/heads/master | 2023-01-21T19:57:18.763894 | 2020-11-25T04:02:28 | 2020-11-25T04:02:28 | 297,432,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from enum import Enum
PROJECT_DIR = __file__.replace("/lib/config.py", "")
class HistoricalPrice(Enum):
TIME_FRAME = 60 # minutes
CHANNEL_WIDTH = 67
class DATABASE(Enum):
class TRADINGBOT(Enum):
HOST = "*********"
USER = "*********"
PASSWORD = "*********"
DATABASE = "*********"
class Bitflyer(Enum):
class Api(Enum):
KEY = "*********"
SECRET = "*********"
class DirPath(Enum):
PROJECT = PROJECT_DIR
class FilePath(Enum):
WARNING_MP3 = PROJECT_DIR + "/sound/WARNING.mp3"
ERROR_MP3 = PROJECT_DIR + "/sound/ERROR.mp3"
SYSTEM_LOG = PROJECT_DIR + "/log/system.log"
AA = PROJECT_DIR + "/document/AA.txt"
| [
"[email protected]"
] | |
75434b093211de8bd09ddd5d42a9bf15f06d16c6 | 77116b044adb3f28c5ea53d17fc69c29fd9bee55 | /modules/influxdb_wrapper.py | 683fcb41dd50d91836b1b24a3421205c11cc4a99 | [
"MIT"
] | permissive | manav1403/stopstalk-deployment | 63a5c22f20cf1dbe81024ba63b33c1c986ae8ada | 667f6d89b24ce04595e2c70e02aa44aa3d836c42 | refs/heads/master | 2023-03-22T18:39:37.371341 | 2021-03-20T15:40:20 | 2021-03-20T15:40:20 | 290,265,152 | 0 | 0 | MIT | 2020-08-25T16:22:59 | 2020-08-25T16:22:58 | null | UTF-8 | Python | false | false | 2,960 | py | """
Copyright (c) 2015-2020 Raj Patel([email protected]), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from influxdb import SeriesHelper
from gluon import current
series_helper_classes = {}
# ------------------------------------------------------------------------------
def get_series_helper(measurement_name,
measurement_fields,
measurement_tags):
if measurement_name in series_helper_classes:
return series_helper_classes[measurement_name]
else:
series_helper_classes[measurement_name] = series_helper_class_wrapper(
measurement_name,
measurement_fields,
measurement_tags
)
return series_helper_classes[measurement_name]
# ------------------------------------------------------------------------------
def series_helper_class_wrapper(measurement_name,
measurement_fields,
measurement_tags):
class StopStalkSeriesHelper(SeriesHelper):
"""Instantiate SeriesHelper to write points to the backend."""
class Meta:
"""Meta class stores time series helper configuration."""
# The client should be an instance of InfluxDBClient.
client = current.INFLUXDB_CLIENT
# The series name must be a string. Add dependent fields/tags
# in curly brackets.
series_name = measurement_name
# Defines all the fields in this time series.
fields = measurement_fields
# Defines all the tags for the series.
tags = measurement_tags
# Defines the number of data points to store prior to writing
# on the wire.
bulk_size = 5
# autocommit must be set to True when using bulk_size
autocommit = True
return StopStalkSeriesHelper | [
"[email protected]"
] | |
5901e5381b54af17773dc3e7c1520e28cf0de3f4 | 2cb507ecd6629b9ff457a36e462f987913d94c1a | /webspider/3.数据提取/3.3bs4模块/07-bs4案例.py | e0ba2dc15a95fa18c7b4907a3e13a505e0e05098 | [
"Apache-2.0"
] | permissive | youaresherlock/PythonPractice | 6869e0a5949675198826e5a07552237a636d6f5b | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | refs/heads/master | 2021-08-16T03:09:44.203035 | 2021-08-02T07:40:00 | 2021-08-02T07:40:00 | 146,625,560 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!usr/bin/python
# -*- coding:utf8 -*-
"""
https://mil.news.sina.com.cn/roll/index.d.html
"""
import json
import requests
from bs4 import BeautifulSoup
url = 'https://mil.news.sina.com.cn/roll/index.d.html'
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content.decode(), 'html.parser')
# 层级选择器
news_list = soup.select('.linkNews li a')
news_results = []
for news in news_list:
new_dict = dict()
new_dict['title'] = news.get_text()
new_dict['url'] = news.get('href')
news_results.append(new_dict)
print(news_results)
with open('news.json', 'w') as f:
content = json.dumps(news_results, ensure_ascii=False, indent=1)
f.write(content)
| [
"[email protected]"
] | |
3f9c5087daf02fa4d3f63eed410bf3cac7690a7a | 5936b0f025944d265cc64d31ef93bc578d5ae6a2 | /home/migrations/0002_load_initial_data.py | aa10fce4bdfc1b079fe4363502f83665c2758cfe | [] | no_license | crowdbotics-apps/smiley-18358 | b4e91ddeaf525aedf990ec1df65d65fb583f4b7c | 7935dd2fad196a7b573c1126905af5fcf93110b0 | refs/heads/master | 2022-11-06T07:02:33.512245 | 2020-06-23T20:08:09 | 2020-06-23T20:08:09 | 274,497,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "smiley"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">smiley</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "smiley-18358.botics.co"
site_params = {
"name": "smiley",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
6d3e6d6192178fdbd567a66120eb0aeb0b1077a1 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_stellar_address.py | e90bce385fe104a9ad05fb1b06683e925a02a2db | [
"Apache-2.0"
] | permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 6,125 | py | """
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
from typing import Optional
import coinaddrvalidator
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_stellar_address(addr: str) -> bool:
try:
res = coinaddrvalidator.validate("xlm", addr).valid
return res
except Exception as e:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidStellarAddress(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_stellar_address"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_stellar_address(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidStellarAddress(ColumnMapExpectation):
"""Expect column values to be valid Stellar address"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"GA7YNBW5CBTJZ3ZZOWX3ZNBKD6OE7A7IHUQVWMY62W2ZBG2SGZVOOPVH",
"GBTA54J4LY5BAQWA4KECII66TPTU3V6DXPBPNVXIPMHN5W6QFATWRXY5",
"GCINDD6LNZSYPND4WRQL6NRFGOAXMAMK7M3QP2JXWC5634BY4DSZ4YG2",
"GDKRCHSD2YUW3X6FXRAVOOZZ2IOMWSGM6SH6I56VCX6V2DTPG7FO626W",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_stellar_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidStellarAddress().print_diagnostic_checklist()
| [
"[email protected]"
] | |
0a84c7d2819c6909abef3aa8cf9c8e577efad451 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /nn_ns/parsing/IterParser/ParseResultAST.py | 205e7e97ea49432cf019048a7b1198e730ce036c | [] | no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 346 | py |
from sand.types.NonMathTree import NonMathTree, LeafNode, OrientedNode, UnorientedNode
class ParseResultAST(NonMathTree):
class __UnboxedTypeID__:pass
class ConRuleNode(OrientedNode, ParseResultAST):pass
class AltRuleNode(UnorientedNode, ParseResultAST):pass
class TerminalNode(LeafNode, ParseResultAST):pass
| [
"[email protected]"
] | |
5793547e4f2688f451442dce1b433dfd365ef5a8 | 9715a7d27f9b146632f964b643ee7243a7e9a38c | /match-sift.py | b7275f8f1c0e357c2af2b24419bc14fbb03ef725 | [] | no_license | uakfdotb/skyquery | 3eb9b2265992127a3c5b3b3612c32ddea0f39195 | dc67b98ee8034711c274408640e3582d20482673 | refs/heads/master | 2020-07-07T03:49:57.856424 | 2019-08-21T19:13:18 | 2019-08-21T19:13:18 | 203,237,682 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,632 | py | from discoverlib import geom, grid_index
import get_db
import cv2
import json
import math
import multiprocessing
import numpy
import os
from PIL import Image
import scipy.ndimage
import sys
video_id = int(sys.argv[1])
db = get_db.get_db()
BASE_PATH = 'ortho-masked.jpg'
FRAME_PATH = 'frames/{}/'.format(video_id)
LK_PARAMETERS = dict(winSize=(21, 21), maxLevel=2, criteria=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 30, 0.01))
# in ortho-imagery resolution units which was 2cm/pixel but resized 4cm/pixel
# and time units is framerate
MAX_SPEED = 75
sift = cv2.xfeatures2d.SIFT_create()
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_L1)
base_im = scipy.ndimage.imread(BASE_PATH)
base_keypoints, base_desc = sift.detectAndCompute(base_im, None)
index = grid_index.GridIndex(256)
for i, kp in enumerate(base_keypoints):
p = geom.Point(kp.pt[0], kp.pt[1])
index.insert(p, i)
def points_to_poly_str(points):
strs = ['{},{}'.format(points[j, 0], points[j, 1]) for j in xrange(points.shape[0])]
return ' '.join(strs)
def homography_from_flow(prev_homography, prev_gray, cur_gray):
positions = []
for i in xrange(0, prev_gray.shape[0]-50, 50):
for j in xrange(0, prev_gray.shape[1]-50, 50):
positions.append((i, j))
positions_np = numpy.array(positions, dtype='float32').reshape(-1, 1, 2)
def flip_pos(positions):
return numpy.stack([positions[:, :, 1], positions[:, :, 0]], axis=2)
next_positions, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, cur_gray, flip_pos(positions_np), None, **LK_PARAMETERS)
if next_positions is None:
return None
next_positions = flip_pos(next_positions)
differences = next_positions[:, 0, :] - positions_np[:, 0, :]
differences_okay = differences[numpy.where(st[:, 0] == 1)]
median = [numpy.median(differences_okay[:, 0]), numpy.median(differences_okay[:, 1])]
good = (numpy.square(differences[:, 0] - median[0]) + numpy.square(differences[:, 1] - median[1])) < 16
if float(numpy.count_nonzero(good)) / differences.shape[0] < 0.7:
return None
# translate previous homography based on the flow result
translation = [numpy.median(differences[:, 0]), numpy.median(differences[:, 1])]
H_translation = numpy.array([[1, 0, -translation[1]], [0, 1, -translation[0]], [0,0,1]], dtype='float32')
return prev_homography.dot(H_translation)
frame_idx_to_fname = {}
for fname in os.listdir(FRAME_PATH):
if '.jpg' not in fname:
continue
frame_idx = int(fname.split('.jpg')[0])
frame_idx_to_fname[frame_idx] = fname
prev_bounds = None
prev_frame, prev_gray = None, None
prev_homography = None
prev_counter = 0
#db.execute("SELECT id, idx FROM video_frames WHERE bounds IS NULL AND video_id = %s ORDER BY idx", [video_id])
db.execute("SELECT id, idx FROM video_frames WHERE video_id = %s ORDER BY idx", [video_id])
for row in db.fetchall():
#while True:
# db.execute("SELECT id, idx FROM video_frames WHERE bounds IS NULL AND video_id = %s ORDER BY RAND() LIMIT 1", [video_id])
# rows = db.fetchall()
# if len(rows) != 1:
# break
# row = rows[0]
frame_id, frame_idx = row
frame_fname = frame_idx_to_fname[frame_idx]
print 'process {}'.format(frame_idx)
frame = scipy.ndimage.imread(FRAME_PATH + frame_fname)
frame = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
H = None
if prev_homography is not None and prev_counter < 5:
H = homography_from_flow(prev_homography, prev_gray, frame_gray)
prev_counter += 1
if H is None:
keypoints, desc = sift.detectAndCompute(frame, None)
if prev_bounds is None:
query_keypoints, query_desc = base_keypoints, base_desc
else:
indices = index.search(prev_bounds.add_tol(2*MAX_SPEED))
indices = numpy.array(list(indices), dtype='int32')
query_keypoints = []
for i in indices:
query_keypoints.append(base_keypoints[i])
query_desc = base_desc[indices]
matches = matcher.knnMatch(queryDescriptors=query_desc, trainDescriptors=desc, k=2)
good = []
for m, n in matches:
if m.distance < 0.6*n.distance:
good.append(m)
src_pts = numpy.float32([keypoints[m.trainIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = numpy.float32([query_keypoints[m.queryIdx].pt for m in good]).reshape(-1,1,2)
try:
H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
except Exception as e:
print 'warning: exception on frame {}: {}'.format(frame_idx, e)
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
prev_counter = 0
if H is None:
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
bound_points = numpy.array([
[0, 0],
[frame.shape[1], 0],
[frame.shape[1], frame.shape[0]],
[0, frame.shape[0]],
], dtype='float32').reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(bound_points, H)
bounds = None
for p in transformed_points[:, 0, :]:
p = geom.Point(p[0], p[1])
if bounds is None:
bounds = p.bounds()
else:
bounds = bounds.extend(p)
print bounds
if prev_bounds is not None:
intersection_area = float(bounds.intersection(prev_bounds).area())
union_area = float(bounds.area() + prev_bounds.area()) - intersection_area
iou = intersection_area / union_area
if iou < 0.6:
print 'iou failed! ({})'.format(iou)
print bounds, prev_bounds
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
poly_str = points_to_poly_str(transformed_points[:, 0, :])
db.execute("UPDATE video_frames SET bounds = %s WHERE id = %s", [poly_str, frame_id])
prev_bounds, prev_frame, prev_gray, prev_homography = bounds, frame, frame_gray, H
# transform detections
db.execute(
"SELECT id, frame_polygon FROM detections WHERE frame_id = %s AND polygon IS NULL",
[frame_id]
)
points = []
detections = []
for row in db.fetchall():
poly_parts = row[1].split(' ')
poly_points = []
for part in poly_parts:
point_parts = part.split(',')
poly_points.append((int(point_parts[0])/2, int(point_parts[1])/2))
detections.append((int(row[0]), len(poly_points)))
points.extend(poly_points)
if len(points) > 0:
points = numpy.array(points, dtype='float32').reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(points, H)
i = 0
for detection_id, num_points in detections:
poly_str = points_to_poly_str(transformed_points[i:i+num_points, 0, :])
db.execute("UPDATE detections SET polygon = %s WHERE id = %s", [poly_str, detection_id])
print poly_str, detection_id
i += num_points
assert i == transformed_points.shape[0]
| [
"[email protected]"
] | |
f5557d5ff6492966343a1b46c76dde955a03f5a7 | b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f | /Design-Patterns-Python/memento/caretaker.py | 6a143d567f4390a284f8bff63c8f3a579f175f00 | [] | no_license | gohils/zemr_notebook | 3f7490ef7a2559655746c3e2e0dbfb835a83891e | 00d53cea9970df44160c51e6ad2bdeadfae2c91f | refs/heads/master | 2023-08-04T14:32:35.428016 | 2023-07-20T11:51:08 | 2023-07-20T11:51:08 | 222,027,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | "The Save/Restore Game functionality"
class CareTaker():
"Guardian. Provides a narrow interface to the mementos"
def __init__(self, originator):
self._originator = originator
self._mementos = []
def save(self):
"Store a new Memento of the Characters current state"
print("CareTaker: Game Save")
memento = self._originator.memento
self._mementos.append(memento)
def restore(self, index):
"""
Replace the Characters current attributes with the state
stored in the saved Memento
"""
print("CareTaker: Restoring Characters attributes from Memento")
memento = self._mementos[index]
self._originator.memento = memento
| [
"[email protected]"
] | |
4b94ea0efb14d60e69e0110fd84977c9ba7a7611 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/virtual_machine_scale_set_public_ip_address_configuration_py3.py | 76a82b78db8773b9a74688ddbdadeac51ed6ec07 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,197 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetPublicIPAddressConfiguration(Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress
configuration.
All required parameters must be populated in order to send to Azure.
:param name: Required. The publicIP address configuration name.
:type name: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param dns_settings: The dns settings to be applied on the publicIP
addresses .
:type dns_settings:
~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:param ip_tags: The list of IP tags associated with the public IP address.
:type ip_tags:
list[~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetIpTag]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
}
def __init__(self, *, name: str, idle_timeout_in_minutes: int=None, dns_settings=None, ip_tags=None, **kwargs) -> None:
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
| [
"[email protected]"
] | |
4673777d1c1a994069de18c0acda79831f581168 | 611055f18da392e5a63b2d80ce102701201981eb | /src/apps/comentarios/admin.py | 52f74fce4df52fd09dd0fe7013e06fc2089b1463 | [] | no_license | danielhuamani/django-backbone | facf6f2ced78991577957bd2f8bb8c42255cd795 | 6523e19d8599753ccf28b6a2d4f511ec0fe0f1c7 | refs/heads/master | 2021-01-10T12:47:26.514543 | 2015-11-18T17:12:02 | 2015-11-18T17:12:02 | 45,657,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.contrib import admin
from .models import Comentario
# Register your models here. #
admin.site.register(Comentario)
| [
"[email protected]"
] | |
56f8397cd80c31bf0258a6c8726c43dfa3581ba0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5688567749672960_1/Python/Jakube/A.py | 4b619d8aaec440fa7678ace44a08aae319de1d8e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | def splitter(number):
s = str(number)
return int(s[:len(s)//2] or "0"), int(s[len(s)//2:]), len(s[len(s)//2:])
def compute(number):
steps = 0
while number:
# get second part of the number
half1, half2, l = splitter(number)
if half2 == 0:
steps += 1
number -= 1
half1, half2, l = splitter(number)
steps += half2 - 1
number -= half2 -1
number = half1 * 10**l + 1
if number == 1:
return steps + 1
# switch
if str(number) != str(number)[::-1]:
number = int(str(number)[::-1])
steps += 1
mi = int(str(number)[1:] or str(number))
number -= mi
steps += mi
def read_number(f):
return int(f.readline().strip())
def main():
with open('A-large.in', 'r') as f:
test_cases = read_number(f)
for test_case in range(test_cases):
number = read_number(f)
#print(number)
print('Case #{}: {}'.format(test_case + 1, compute(number)))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
76347a0bc807d2e3b00e30fef2748954370b3171 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn5 - tviti/M-17135-2263.py | 9deedfe8d85e527c7a5c6e89ba8391269f3c8492 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,569 | py | def unikati(s):
sez = []
for x in s:
if x not in sez:
sez.append(x)
return sez
def avtor(tvit):
a = ""
for x in range(len(tvit)):
if tvit[x] == ":":
break
else:
a += tvit[x]
return a
def izloci_besedo(beseda):
beseda_1 = ""
for x in range(len(beseda)):
if beseda[x].isalnum() == True:
beseda_1 += beseda[x]
elif beseda[x] == "-" and beseda[x-1].isalnum() == True and beseda[x+1].isalnum() == True:
beseda_1 += beseda[x]
return beseda_1
def vsi_avtorji(tviti):
sez = []
for x in tviti:
avtor_ime = avtor(x)
if avtor_ime not in sez:
sez.append(avtor_ime)
return sez
def se_zacne_z(tvit, c):
sez = tvit.split()
sez_besed = []
for x in sez:
if x[0] == c:
if x[-1].isalnum() == True:
sez_besed.append(x[1:])
else:
sez_besed.append(x[1:-1])
return sez_besed
def vse_afne(tviti):
sez_imen = []
for x in tviti:
besede = x.split()
for x in besede:
if x[0] == "@":
if x[-1].isalnum() == True:
if x[1:] not in sez_imen:
sez_imen.append(x[1:])
else:
if x[1:-1] not in sez_imen:
sez_imen.append(x[1:-1])
return sez_imen
def vse_osebe(tviti):
sez = vse_afne(tviti)
sez_imen = vsi_avtorji(tviti)
n = 0
for x in range(len(sez)):
if sez[n] not in sez_imen:
sez_imen.append(sez[n])
n += 1
sez = sorted(sez_imen)
return sez
def vsi_hashtagi(tviti):
sez = []
for x in tviti:
besede = x.split()
for x in besede:
if x[0] == "#":
if x[-1].isalnum() == True:
if x[1:] not in sez:
sez.append(x[1:])
else:
if x[1:-1] not in sez:
sez.append(x[1:-1])
return sez
def zberi_se_zacne_z(tviti, c):
sez_besed = []
for x in tviti:
sez = x.split()
for x in sez:
if x[0] == c:
if x[-1].isalnum() == True:
if x[1:] not in sez_besed:
sez_besed.append(x[1:])
else:
if x[1:-1] not in sez_besed:
sez_besed.append(x[1:-1])
return sez_besed
def custva(tviti, hashtagi):
sez_imen = []
for x in tviti:
sez = x.split()
avtor = sez[0][:-1]
for x in sez:
if x[0] == "#":
if x[1:] in hashtagi and avtor not in sez_imen:
sez_imen.append(avtor)
return sorted(sez_imen)
def se_poznata(tviti, oseba1, oseba2):
zakljucek = False
sez = [oseba1, oseba2]
for x in sez:
for y in tviti:
besede = y.split()
for s in besede:
sez_besed = []
if s[0] == "@":
if besede[0][:-1] == x:
if s[-1].isalnum() == True:
if s[1:] not in sez_besed:
sez_besed.append(s[1:])
else:
if s[1:-1] not in sez_besed:
sez_besed.append(s[1:-1])
for d in sez_besed:
if x == oseba1:
if oseba2 in sez_besed:
zakljucek = True
else:
if oseba1 in sez_besed:
zakljucek = True
return zakljucek
import unittest
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
d0ac595a122ecd472ef080d0b8bd510635b637ea | 6fab6422c26e00cde21f51f8f10eb88ff5c458af | /api/serializers.py | 51f7198a3741d3245a04b600aeef1d4bc543c61a | [] | no_license | nicksonlangat/alzy-api | ffa3f43198fa0a6e8f58b88ae3f206e4c69f6cfb | d4a95da469d0895eb0c8a2897f3927e61da89aa9 | refs/heads/master | 2023-02-28T08:36:15.600769 | 2021-02-09T22:37:03 | 2021-02-09T22:37:03 | 337,120,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password' : {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class ReminderSerializer(serializers.ModelSerializer):
class Meta:
model = Reminder
fields = ('id', 'title','details','deadline',)
def create(self, validated_data): #overwrite built in create fn.
# create new instance of the model
reminder=Reminder.objects.create(**validated_data)
return reminder
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = File
fields = "__all__" | [
"[email protected]"
] | |
ee5d9088a648e83c220c2dc7e4f83db84f9ab93e | f02e654d5590a861804e3220ed76ba2192e1699b | /aslam/deprecated/ASLAM/deprecated/old2/test.py | bdc9562460aa075503b52776c3db9d3ae345080c | [
"MIT",
"BSD-3-Clause"
] | permissive | AmarNathH/software | 73e2afd3affaf2c1595b406480edac8b8fb2fcac | e225810c7501250f48add43349a64f49450cc79f | refs/heads/master | 2020-12-02T20:50:18.439874 | 2017-07-03T16:51:07 | 2017-07-03T16:51:07 | 96,219,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python2.7
from classes import *
import numpy as n
S = State(5, 1, 5, 1)
for x in range(5): S.objects[str(x)] = Object()
S.update()
S.hObs('0', 45, 5)
S.dObs('0', 10**(1./2), 0.5)
S.update()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
pmap = S.objects['0'].pmap
xv, yv, zv = [], [], []
for x in range(len(pmap)):
for y in range(len(pmap[0])):
xv += [x / GRIDSCALE]
yv += [y / GRIDSCALE]
zv += [pmap[x][y]]
ax.scatter(xv, yv, zv)
plt.show()
#for i in range(len(x)):
# for j in range(len(y)):
| [
"[email protected]"
] | |
1197d22b4092f0070ba99d63e0074573c7e860f4 | 6045f8519065f17b9d832a8e051723a520b58e3c | /ex Basic Sytax/2. Drink Something.py | bc6082c6982ee35b8a65971bc335d24452e1b965 | [] | no_license | a-angeliev/Python-Fundamentals-SoftUni | a308a6c94eb705a3319f6e081543c1cad0b1b37d | a9a5eba0376ebc7395daeda527408d1e59d58316 | refs/heads/master | 2023-07-19T05:55:28.104160 | 2021-09-11T18:25:58 | 2021-09-11T18:25:58 | 399,575,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | n = int(input())
if n<=14:
print("drink toddy")
elif n<=18:
print("drink coke")
elif n<=21:
print("drink beer")
else:
print("drink whisky") | [
"[email protected]"
] | |
f3c46d47d4582718dfb6dd5b01fc9693777fc6bd | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2023_07_01_preview/aio/_dns_management_client.py | 27d21876b1846ae591194de288047cefd6a1b680 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,306 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import DnsManagementClientConfiguration
from .operations import DnsResourceReferenceOperations, DnssecConfigsOperations, RecordSetsOperations, ZonesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DnsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The DNS Management Client.
:ivar dnssec_configs: DnssecConfigsOperations operations
:vartype dnssec_configs:
azure.mgmt.dns.v2023_07_01_preview.aio.operations.DnssecConfigsOperations
:ivar record_sets: RecordSetsOperations operations
:vartype record_sets: azure.mgmt.dns.v2023_07_01_preview.aio.operations.RecordSetsOperations
:ivar zones: ZonesOperations operations
:vartype zones: azure.mgmt.dns.v2023_07_01_preview.aio.operations.ZonesOperations
:ivar dns_resource_reference: DnsResourceReferenceOperations operations
:vartype dns_resource_reference:
azure.mgmt.dns.v2023_07_01_preview.aio.operations.DnsResourceReferenceOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2023-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = DnsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.dnssec_configs = DnssecConfigsOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.record_sets = RecordSetsOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.zones = ZonesOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.dns_resource_reference = DnsResourceReferenceOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DnsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
e8bad14d95e08fc8e990e74f3bdf81de17ebc718 | 23b7fa714698be444d82ac649314616495c66235 | /petl/transform/__init__.py | 5dac1f6cd765844d320a78b291999fef24a54ef6 | [
"MIT"
] | permissive | mbelmadani/petl | a38ed1e595157fb556fe86ae32e796f6eff60a7a | b6867f056bf44d699f8f7b8432769e4b5127e937 | refs/heads/master | 2021-04-03T09:04:56.785188 | 2019-08-06T15:09:40 | 2019-08-06T15:09:40 | 124,597,339 | 0 | 0 | MIT | 2018-03-09T21:53:44 | 2018-03-09T21:53:44 | null | UTF-8 | Python | false | false | 2,444 | py | from __future__ import absolute_import, print_function, division
from petl.transform.basics import cut, cutout, movefield, cat, annex, \
addfield, addfieldusingcontext, addrownumbers, addcolumn, rowslice, head, \
tail, skipcomments, stack
from petl.transform.headers import rename, setheader, extendheader, \
pushheader, skip, prefixheader, suffixheader, sortheader
from petl.transform.conversions import convert, convertall, replace, \
replaceall, update, convertnumbers, format, formatall, interpolate, \
interpolateall
from petl.transform.sorts import sort, mergesort, issorted
from petl.transform.selects import select, selectop, selectcontains, \
selecteq, selectfalse, selectge, selectgt, selectin, selectis, \
selectisinstance, selectisnot, selectle, selectlt, selectne, selectnone, \
selectnotin, selectnotnone, selectrangeclosed, selectrangeopen, \
selectrangeopenleft, selectrangeopenright, selecttrue, \
selectusingcontext, rowlenselect, facet, biselect
from petl.transform.joins import join, leftjoin, rightjoin, outerjoin, \
crossjoin, antijoin, lookupjoin, unjoin
from petl.transform.hashjoins import hashjoin, hashleftjoin, hashrightjoin, \
hashantijoin, hashlookupjoin
from petl.transform.reductions import rowreduce, mergeduplicates,\
aggregate, groupcountdistinctvalues, groupselectfirst, groupselectmax, \
groupselectmin, merge, fold, Conflict, groupselectlast
from petl.transform.fills import filldown, fillright, fillleft
from petl.transform.regex import capture, split, search, searchcomplement, \
sub
from petl.transform.reshape import melt, recast, transpose, pivot, flatten, \
unflatten
from petl.transform.maps import fieldmap, rowmap, rowmapmany, rowgroupmap
from petl.transform.unpacks import unpack, unpackdict
from petl.transform.dedup import duplicates, unique, distinct, conflicts, \
isunique
from petl.transform.setops import complement, intersection, \
recordcomplement, diff, recorddiff, hashintersection, hashcomplement
from petl.transform.intervals import intervaljoin, intervalleftjoin, \
intervaljoinvalues, intervalantijoin, intervallookup, intervallookupone, \
intervalrecordlookup, intervalrecordlookupone, intervalsubtract, \
facetintervallookup, facetintervallookupone, facetintervalrecordlookup, \
facetintervalrecordlookupone, collapsedintervals
from petl.transform.validation import validate
| [
"[email protected]"
] | |
a39a00acac47914e717411524682266198077482 | 7fb51ae4163aeea47d0fb434f28666ea99b104af | /app.py | 2cb0275c32bef3070e1b21c6218a864f8431cfd1 | [] | no_license | knowsuchagency/cdk-hello-apigw-asgi | 153eaae8d01a14e5886315122613c462ea90de70 | a47cdc58ddd9bb070419d4fbcfa1cf07fb3873f9 | refs/heads/master | 2022-12-28T15:44:05.585842 | 2020-10-18T18:17:15 | 2020-10-18T18:17:15 | 301,259,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #!/usr/bin/env python3
from aws_cdk import core
from hello_apig_wsgi.hello_apig_wsgi_stack import HelloApigWsgiStack
from hello_apig_wsgi.pipeline_stack import PipelineStack
from pydantic import BaseSettings
class Config(BaseSettings):
"""https://pydantic-docs.helpmanual.io/usage/settings/"""
account: str = "385504394431"
region: str = "us-east-2"
gh_username: str = "knowsuchagency"
gh_repo: str = "cdk-hello-apigw-asgi"
if __name__ == "__main__":
config = Config()
app = core.App()
application_stack = HelloApigWsgiStack(app, "application")
pipeline_stack = PipelineStack(
app,
"pipeline",
config,
env={"account": config.account, "region": config.region},
)
app.synth()
| [
"[email protected]"
] | |
31c03c46273a3ec99f7d4ec05e1b47a219fe961a | 291c08a11a29ce995099f775ac0ef79cd69dd1fc | /file_app/migrations/0001_initial.py | 3918065b948c8b8a81a7a5331b098db45406b028 | [
"MIT"
] | permissive | Amirsorouri00/neolej | 1e278a2216a961b8abedc32b30d4fccf5c431d0b | 8fa18f2c1a38b0a59ed7eeeed7ed37ef7b9dad97 | refs/heads/master | 2020-04-20T15:36:24.669991 | 2019-03-17T07:20:02 | 2019-03-17T07:20:02 | 168,935,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # Generated by Django 2.1.3 on 2019-02-16 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('remark', models.CharField(max_length=31)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
95ee6d9028cb4c1c7c5a614b96db2580eee8344c | e859d4604615e4ff3c6730554b12ae7b09e86286 | /django-stubs/db/models/fields/files.pyi | bb53d5944104eade0990047b3af0abafb3dbaff7 | [
"BSD-3-Clause"
] | permissive | microblag/django-stubs | d91655c346279424cf5e57b80a0b104dceb86ddc | d0eb05832551d344f06ec3e83cb850866a4d37c2 | refs/heads/master | 2020-04-18T05:18:24.887114 | 2019-02-06T04:02:28 | 2019-02-06T04:02:28 | 167,273,694 | 0 | 0 | null | 2019-01-24T00:12:42 | 2019-01-24T00:12:42 | null | UTF-8 | Python | false | false | 2,954 | pyi | from typing import Any, Callable, List, Optional, Type, Union
from django.core.checks.messages import Error
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import FileSystemStorage, Storage
from django.db.models.base import Model
from django.db.models.fields import Field
from django.forms import fields as form_fields
class FieldFile(File):
instance: Model = ...
field: FileField = ...
storage: FileSystemStorage = ...
def __init__(self, instance: Model, field: FileField, name: Optional[str]) -> None: ...
file: Any = ...
@property
def path(self) -> str: ...
@property
def url(self) -> str: ...
@property
def size(self) -> int: ...
def save(self, name: str, content: File, save: bool = ...) -> None: ...
def delete(self, save: bool = ...) -> None: ...
@property
def closed(self) -> bool: ...
class FileDescriptor:
field: FileField = ...
def __init__(self, field: FileField) -> None: ...
def __get__(self, instance: Optional[Model], cls: Type[Model] = ...) -> Union[FieldFile, FileDescriptor]: ...
def __set__(self, instance: Model, value: Optional[Any]) -> None: ...
class FileField(Field):
attr_class: Any = ...
descriptor_class: Any = ...
description: Any = ...
storage: Any = ...
upload_to: Any = ...
def __init__(
self,
verbose_name: Optional[str] = ...,
name: Optional[str] = ...,
upload_to: Union[Callable, str] = ...,
storage: Optional[Storage] = ...,
**kwargs: Any
) -> None: ...
def check(self, **kwargs: Any) -> List[Error]: ...
def deconstruct(self) -> Any: ...
def get_internal_type(self) -> str: ...
def get_prep_value(self, value: Union[FieldFile, str]) -> str: ...
def pre_save(self, model_instance: Model, add: bool) -> FieldFile: ...
def generate_filename(self, instance: Optional[Model], filename: str) -> str: ...
def save_form_data(self, instance: Model, data: Optional[Union[bool, File, str]]) -> None: ...
def formfield(self, **kwargs: Any) -> form_fields.FileField: ...
class ImageFileDescriptor(FileDescriptor):
field: ImageField
def __set__(self, instance: Model, value: Optional[str]) -> None: ...
class ImageFieldFile(ImageFile, FieldFile):
field: ImageField
def delete(self, save: bool = ...) -> None: ...
class ImageField(FileField):
def __init__(
self,
verbose_name: Optional[str] = ...,
name: Optional[str] = ...,
width_field: Optional[str] = ...,
height_field: Optional[str] = ...,
**kwargs: Any
) -> None: ...
def check(self, **kwargs: Any) -> List[Any]: ...
def deconstruct(self) -> Any: ...
def update_dimension_fields(self, instance: Model, force: bool = ..., *args: Any, **kwargs: Any) -> None: ...
def formfield(self, **kwargs: Any) -> form_fields.ImageField: ...
| [
"[email protected]"
] | |
93445be0fe7f2304b57849fd393fb87152e4fed1 | 95230c76a9e09d518c125ea8105002a7af6d1afc | /05_qstyle_sheets/style_sheets_example.py | 48aab6b37dbed01f2b7497c75912ca16b2631c56 | [] | no_license | amkartheek/nuke_python | d5f86f5ccb9742cd65acaf571fd4f5c7ca4032ff | 67ed5e25796506c9321f487f576bc142842e0041 | refs/heads/master | 2020-05-31T19:04:19.463232 | 2018-03-09T19:17:19 | 2018-03-09T19:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | from PySide.QtGui import *
from PySide.QtCore import *
import sys
class MyLineEdit(QLineEdit):
def __init__(self):
super(MyLineEdit, self).__init__()
class Panel(QWidget):
def __init__(self):
super(Panel, self).__init__()
first_name_label = QLabel("First Name:")
self.first_name = QLineEdit()
self.first_name.setProperty("valid", False)
self.first_name.setObjectName("first_name")
last_name_label = QLabel("Last Name:")
last_name = QLineEdit()
name_layout = QHBoxLayout()
name_layout.addWidget(first_name_label)
name_layout.addWidget(self.first_name)
name_layout.addWidget(last_name_label)
name_layout.addWidget(last_name)
role_label = QLabel("Role")
role_combobox = QComboBox()
role_combobox.addItems(["Pipeline TD", "Compositor", "FX TD", "Modeler", "Animator", "Lighting TD"])
role_layout = QHBoxLayout()
role_layout.addWidget(role_label)
role_layout.addWidget(role_combobox)
role_layout.addStretch()
self.gender_male_checkbox = QCheckBox("male")
self.gender_famale_checbox = QCheckBox("famale")
gender_layout = QHBoxLayout()
gender_layout.addWidget(self.gender_male_checkbox)
gender_layout.addWidget(self.gender_famale_checbox)
gender_layout.addStretch()
list_widget = QListWidget()
list_widget.addItems(["Canada", "USA", "Japan", "London", "Australia"])
# list_widget.setAlternatingRowColors(True)
save_push_button = QPushButton("OK")
close_pusu_button = QPushButton("Close")
action_layout = QHBoxLayout()
action_layout.addWidget(save_push_button)
action_layout.addWidget(close_pusu_button)
master_layout = QVBoxLayout()
master_layout.addLayout(name_layout)
master_layout.addLayout(role_layout)
master_layout.addLayout(gender_layout)
master_layout.addWidget(list_widget)
master_layout.addLayout(action_layout)
self.setLayout(master_layout)
# Signals
close_pusu_button.clicked.connect(self.close)
save_push_button.clicked.connect(self.show_message_box)
self.gender_male_checkbox.clicked.connect(self.set_checkbox)
self.gender_famale_checbox.clicked.connect(self.set_checkbox)
self.first_name.textChanged.connect(self.check_validity)
self.set_style_sheet()
def check_validity(self, text):
self.first_name.setProperty("valid", bool(text))
self.set_style_sheet()
def set_style_sheet(self):
text = open("style.txt").read()
self.setStyleSheet(text)
def set_checkbox(self):
self.gender_famale_checbox.setChecked(self.sender() is self.gender_famale_checbox)
self.gender_male_checkbox.setChecked(self.sender() is self.gender_male_checkbox)
def show_message_box(self):
QMessageBox.information(self, "information", "User saved successfully!")
app = QApplication(sys.argv)
panel = Panel()
panel.show()
app.exec_()
| [
"[email protected]"
] | |
bc72cc0f0343ca37bc40790a466c5e2c0b09be43 | 2f46c6463d4f871a72d4296c3dae00f029e892f1 | /src/cogent3/maths/stats/jackknife.py | 33192edc584ffa4dc6506935473a1e778893a7bd | [
"BSD-3-Clause"
] | permissive | BrendanBeaton/cogent3 | a09376c55f24da837690219157770ad94e917579 | e10f4f933921d52b000096b7c016190a1602add6 | refs/heads/master | 2022-12-02T07:59:11.112306 | 2020-06-30T05:40:33 | 2020-06-30T05:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,782 | py | import numpy as np
from cogent3.util.table import Table
__author__ = "Anuj Pahwa, Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Anuj Pahwa", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
def index_gen(length):
data = tuple(range(length))
def gen(i):
temp = list(data)
temp.pop(i)
return temp
return gen
class JackknifeStats(object):
"""Computes the jackknife statistic for a particular statistical function
as outlined by 'Tukey's Jackknife Method' Biometry by Sokal/Rohlf."""
def __init__(self, length, calc_stat, gen_index=index_gen):
"""
Parameters
----------
length : int
The length of the data set (since data is not passed to this class).
calc_stat : callable
A callback function that computes the required statistic of a defined dataset.
gen_index
A callback function that generates a list of indices that are used to sub-sample the dataset.
"""
super(JackknifeStats, self).__init__()
self.n = length
self.calc_stat = calc_stat
self.gen_index = gen_index(self.n)
self._subset_statistics = None
self._pseudovalues = None
self._jackknifed_stat = None
self._sample_statistic = None
self._standard_error = None
def jackknife(self):
"""Computes the jackknife statistics and standard error"""
n = self.n
n_minus_1 = n - 1
# compute the statistic in question on the whole data set
self._sample_statistic = self.calc_stat(list(range(self.n)))
n_sample_statistic = n * self._sample_statistic
# compute the jackknife statistic for the data by removing an element
# in each iteration and computing the statistic.
subset_statistics = []
pseudovalues = []
for index in range(self.n):
stat = self.calc_stat(self.gen_index(index))
subset_statistics.append(stat)
pseudovalue = n_sample_statistic - n_minus_1 * stat
pseudovalues.append(pseudovalue)
self._pseudovalues = np.array(pseudovalues)
self._subset_statistics = np.array(subset_statistics)
self._jackknifed_stat = self._pseudovalues.mean(axis=0)
# Compute the approximate standard error of the jackknifed estimate
# of the statistic
variance = np.square(self._pseudovalues - self._jackknifed_stat).sum(axis=0)
variance_norm = np.divide(variance, n * n_minus_1)
self._standard_error = np.sqrt(variance_norm)
@property
def sample_stat(self):
if self._sample_statistic is None:
self.jackknife()
return self._sample_statistic
@property
def jackknifed_stat(self):
if self._jackknifed_stat is None:
self.jackknife()
return self._jackknifed_stat
@property
def standard_error(self):
if self._standard_error is None:
self.jackknife()
return self._standard_error
@property
def sub_sample_stats(self):
"""Return a table of the sub-sample statistics"""
# if the statistics haven't been run yet.
if self._subset_statistics is None:
self.jackknife()
# generate table
title = "Subsample Stats"
rows = []
for index in range(self.n):
row = [index]
subset_statistics = self._subset_statistics[index]
try:
for value in subset_statistics:
row.append(value)
except TypeError:
row.append(subset_statistics)
rows.append(row)
header = ["i"]
subset_stats = self._subset_statistics[0]
try:
num_datasets = len(subset_stats)
for i in range(num_datasets):
header.append("Stat_%s-i" % i)
except TypeError:
header.append("Stat-i")
return Table(data=rows, header=header, title=title)
@property
def pseudovalues(self):
"""Return a table of the Pseudovalues"""
# if the statistics haven't been run yet.
if self._pseudovalues is None:
self.jackknife()
# detailed table
title = "Pseudovalues"
rows = []
for index in range(self.n):
row = [index]
pseudovalues = self._pseudovalues[index]
try:
for value in pseudovalues:
row.append(value)
except TypeError:
row.append(pseudovalues)
rows.append(row)
header = ["i"]
pseudovalues = self._pseudovalues[0]
try:
num_datasets = len(pseudovalues)
for i in range(num_datasets):
header.append("Pseudovalue_%s-i" % i)
except TypeError:
header.append("Pseudovalue-i")
return Table(data=rows, header=header, title=title)
@property
def summary_stats(self):
"""Return a summary table with the statistic value(s) calculated for the
the full data-set, the jackknife statistics and standard errors."""
# if the statistics haven't been run yet.
if self._jackknifed_stat is None:
self.jackknife()
header = ["Sample Stat", "Jackknife Stat", "Standard Error"]
title = "Summary Statistics"
rows = np.vstack(
(self._sample_statistic, self._jackknifed_stat, self._standard_error)
)
rows = rows.transpose()
return Table(header=header, data=rows, title=title)
| [
"[email protected]"
] | |
f15ea5350f91db08607111b1b3da17afdb7e9df0 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /compositional_rl/gwob/examples/web_environment_example.py | db65accda519a7ce01ec591613e7c7d0385b57be | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 6,400 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example execution of a rule-based optimal policy on gminiwob shopping."""
import time
from absl import app
from absl import flags
from absl import logging
from CoDE import test_websites
from CoDE import utils
from CoDE import vocabulary_node
from CoDE import web_environment
flags.DEFINE_string("data_dep_path", None,
"Data dep path for local miniwob files.")
flags.DEFINE_boolean(
"run_headless_mode", False,
"Run in headless mode. On borg, this should always be true.")
flags.DEFINE_boolean(
"use_conceptual", False,
"If true, use abstract web navigation where it is assumed to known which profile field corresponds to which element."
)
FLAGS = flags.FLAGS
def run_policy_on_shopping_website():
"""Run an optimal policy on the shopping website and visualize in browser."""
# Create a generic web environment to which we will add primitives and
# transitions to create a shopping website. These parameters will work to
# observe a simple policy running but they might be insufficient in a training
# setting as observations will be converted into arrays and these parameters
# are used to shape them. In this example, they don't have that effect.
env = web_environment.GMiniWoBWebEnvironment(
base_url="file://{}/".format(FLAGS.data_dep_path),
subdomain="gminiwob.generic_website",
profile_length=5,
number_of_fields=5,
use_only_profile_key=False,
number_of_dom_elements=150,
dom_attribute_sequence_length=5,
keyboard_action_size=5,
kwargs_dict={
"headless": FLAGS.run_headless_mode,
"threading": False
},
step_limit=25,
global_vocabulary=vocabulary_node.LockedVocabulary(),
use_conceptual=FLAGS.use_conceptual)
# Create a shopping website design with difficulty = 3.
website = test_websites.create_shopping_website(3)
design = test_websites.generate_website_design_from_created_website(
website)
# Design the actual environment.
env.design_environment(
design, auto_num_pages=True)
# Make sure raw_state=True as this will return raw observations not numpy
# arrays.
state = env.reset(raw_state=True)
# Optimal sequences of elements to visit. Some might be redundant and will be
# skipped.
optimal_actions = [
"group_next_p0",
"group_username",
"group_password",
"group_rememberme",
"group_captcha",
"group_stayloggedin",
"group_next_p1",
"group_next_p2",
"group_name_first",
"group_name_last",
"group_address_line1",
"group_address_line2",
"group_city",
"group_postal_code",
"group_state",
"group_submit_p2",
]
# Corresponding pages of these elements:
# [0, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3]
reward = 0.0
logging.info("Utterance: %s", str(state.utterance))
logging.info("\n\n")
logging.info("All available primitives: %s",
str(env.get_all_actionable_primitives()))
logging.info("\n\n")
# Iterate over all optimal actions. For each action, iterate over all elements
# in the current observation. If an element matches, execute the optimal
# action and continue.
# Iterate over optimal actions.
for action in optimal_actions:
logging.info("Element at focus: %s", str(action))
# Iterate over all elements in the current observation.
# order_dom_elements returns an ordered list of DOM elements to make the
# order and elements consistent.
for i, element in enumerate(
utils.order_dom_elements(state.dom_elements, html_id_prefix=None)):
# If HTML if of the element matches the action, execute the action.
if element.id == action.replace("group", "actionable"):
logging.info("Acting on (%s)", str(element))
logging.info("\tAttributes of the element: %s",
str(utils.dom_attributes(element, 5)))
# Get the corresponding profile fields.
profile_keys = env.raw_profile.keys
# Execute the (element index, profile field index) action on the
# website. Environment step function accepts a single scalar action.
# We flatten the action from a tuple to a scalar which is deflattened
# back to a tuple in the step function.
if action[len("group") +
1:] in profile_keys and not FLAGS.use_conceptual:
logging.info("Profile: %s, Element ID: %s",
str(profile_keys.index(action[len("group") + 1:])),
str(action[len("group") + 1:]))
# action=element_index + profile_field_index * number_of_elements
# This is converted back into a tuple using a simple modulo
# arithmetic.
state, r, _, _ = env.step(
i + profile_keys.index(action[len("group") + 1:]) *
env.number_of_dom_elements, True)
else: # This is the case where we have abstract navigation problem.
logging.info("Element ID: %s", str(action[len("group") + 1:]))
# We don't need to convert a tuple into a scalar because in this case
# the environment expects the index of the element.
state, r, _, _ = env.step(i, True)
logging.info("Current reward: %f", r)
reward += r
if not FLAGS.run_headless_mode:
# wait 1 sec so that the action can be observed on the browser.
time.sleep(1)
break
logging.info("Final reward: %f", reward)
if not FLAGS.run_headless_mode:
# wait 30 secs so that the users can inspect the html in the browser.
time.sleep(30)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
run_policy_on_shopping_website()
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
3523fe1ae052b3f169f7bc74db4e83be9b2377c2 | 40afc1f3790099d2d5270503d101f30c71a89f07 | /usersys/views/user.py | d4c9af3172aaa675d041cfa02bcb920867dd7649 | [] | no_license | fhydralisk/reviewing | a3d31af1e8fe8caf2e831b35816d638ac0cadcce | 7a27f278f85f9fdbcc805b0290f6bbdbb7147609 | refs/heads/master | 2020-05-14T23:27:37.229343 | 2019-05-07T12:28:21 | 2019-05-07T12:28:21 | 181,997,119 | 0 | 2 | null | 2019-05-07T07:38:14 | 2019-04-18T01:49:53 | Python | UTF-8 | Python | false | false | 431 | py | from base.views import WLAPIGenericView
from ..serializers import user as user_serializers
from ..funcs import user as user_funcs
class UserView(WLAPIGenericView):
http_method_names = ['get', 'patch', 'options']
API_SERIALIZER = {
'patch': user_serializers.UserPartialUpdateSerializer
}
RESULT_SERIALIZER = {
'get': user_serializers.UserDetailSerializer
}
FUNC_CLASS = user_funcs.UserFunc
| [
"[email protected]"
] | |
62ab32f13bfb48de1118f28c062ed0d2f5702325 | 6e5c83baa19e09bcc59300d764ce936f8cbe6b5b | /pybtex/style/names/plain.py | 62c0c2ca311b0e086a1a078c4410d14d84d02f38 | [
"MIT"
] | permissive | rybesh/pybtex | 84e10b12f6c9ade0de2af638bfc23945109eff6d | 18e0b5336f07ebc5dc97aa899362fb292ea7bb5a | refs/heads/master | 2016-08-07T20:15:26.865726 | 2011-03-18T18:03:48 | 2011-03-18T18:03:48 | 1,246,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | # Copyright (c) 2010, 2011 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.style.template import join
from pybtex.style.names import BaseNameStyle, name_part
class NameStyle(BaseNameStyle):
name = 'plain'
def format(self, person, abbr=False):
r"""
Format names similarly to {ff~}{vv~}{ll}{, jj} in BibTeX.
>>> from pybtex.core import Person
>>> name = Person(string=r"Charles Louis Xavier Joseph de la Vall{\'e}e Poussin")
>>> plain = NameStyle().format
>>> print plain(name).format().plaintext()
Charles Louis Xavier<nbsp>Joseph de<nbsp>la Vall{\'e}e<nbsp>Poussin
>>> print plain(name, abbr=True).format().plaintext()
C.<nbsp>L. X.<nbsp>J. de<nbsp>la Vall{\'e}e<nbsp>Poussin
>>> name = Person(first='First', last='Last', middle='Middle')
>>> print plain(name).format().plaintext()
First<nbsp>Middle Last
>>> print plain(name, abbr=True).format().plaintext()
F.<nbsp>M. Last
>>> print plain(Person('de Last, Jr., First Middle')).format().plaintext()
First<nbsp>Middle de<nbsp>Last, Jr.
"""
return join [
name_part(tie=True) [person.first(abbr) + person.middle(abbr)],
name_part(tie=True) [person.prelast()],
name_part [person.last()],
name_part(before=', ') [person.lineage()]
]
| [
"[email protected]"
] | |
712c8911fb30a81f68341c8d02607fc01373169c | bc2effb57e82128b81371fb03547689255d5ef15 | /백준/그래프/13549(숨바꼭질 3).py | 3e27f94ac43b4efa403bf096775a59d3e8e538cd | [] | no_license | CharmingCheol/python-algorithm | 393fa3a8921f76d25e0d3f02402eae529cc283ad | 61c8cddb72ab3b1fba84171e03f3a36f8c672648 | refs/heads/master | 2023-03-01T11:00:52.801945 | 2021-01-31T13:38:29 | 2021-01-31T13:38:29 | 229,561,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import sys
from collections import deque
MAX_SIZE = 100001
start, end = map(int, sys.stdin.readline().split())
board = [float("inf")] * MAX_SIZE
board[start] = 0
queue = deque()
queue.append((start, 0))
while queue:
now, value = queue.popleft()
if now == end:
print(board[now])
break
if value != board[now]: continue
if 0 <= now - 1 and value + 1 < board[now - 1]:
board[now - 1] = value + 1
queue.append((now - 1, value + 1))
if now + 1 < MAX_SIZE and value + 1 < board[now + 1]:
board[now + 1] = value + 1
queue.append((now + 1, value + 1))
if now * 2 < MAX_SIZE and value < board[now * 2]:
board[now * 2] = value
queue.append((now * 2, value))
| [
"[email protected]"
] | |
86e497f7d8b7f8e601d5bdf3d3d634b51fbc04bf | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/hair_tool/curves_resample.py | bbf794543f831be09e4c96a6a4ed9485f74a8093 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,061 | py | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2017 JOSECONSCO
# Created by JOSECONSCO
import bpy
import math
import numpy as np
from bpy.props import EnumProperty, FloatProperty, BoolProperty, IntProperty, StringProperty
from .resample2d import interpol_Catmull_Rom, get_strand_proportions
class HT_OT_CurvesResample(bpy.types.Operator):
bl_label = "Curve resample"
bl_idname = "object.curve_resample"
bl_description = "Change ammount of points on curve"
bl_options = {"REGISTER", "UNDO"}
hairType: bpy.props.EnumProperty(name="Output Curve Type", default="NURBS",
items=(("BEZIER", "Bezier", ""),
("NURBS", "Nurbs", ""),
("POLY", "Poly", "")))
# bezierRes: IntProperty(name="Bezier resolution", default=3, min=1, max=12)
t_in_y: IntProperty(name="Strand Segments", default=8, min=3, max=20)
uniformPointSpacing: BoolProperty(name="Uniform spacing", description="Distribute stand points with uniform spacing", default=False)
equalPointCount: BoolProperty(name="Equal point count", description="Give all cures same points count \n"
"If disabled shorter curves will have less points", default=False)
onlySelection: BoolProperty(name="Only Selected", description="Affect only selected points", default=False)
def invoke(self, context, event):
particleObj = context.active_object
if particleObj.mode == 'EDIT':
self.onlySelection = True
elif particleObj.mode == 'OBJECT':
self.onlySelection = False
Curve = context.active_object
if not Curve.type == 'CURVE':
self.report({'INFO'}, 'Use operator on curve type object')
return {"CANCELLED"}
self.input_spline_type = Curve.data.splines[0].type
self.hairType = self.input_spline_type # hair type - output spline
if self.input_spline_type == 'NURBS':
self.nurbs_order = Curve.data.splines[0].order_u
if len(Curve.data.splines) > 0: # do get initnial value for resampling t
polyline = Curve.data.splines[0] # take first spline len for resampling
if polyline.type == 'NURBS' or polyline.type == 'POLY':
self.t_in_y = len(polyline.points)
else:
self.t_in_y = len(polyline.bezier_points)
self.bezierRes = Curve.data.resolution_u
return self.execute(context)
def execute(self, context):
curveObj = context.active_object
if curveObj.type != 'CURVE':
self.report({'INFO'}, 'Works only on curves')
return {"CANCELLED"}
pointsList = []
pointsRadius = []
pointsTilt = []
selectedSplines = []
if self.onlySelection:
for polyline in curveObj.data.splines:
if polyline.type == 'NURBS' or polyline.type == 'POLY':
if any(point.select == True for point in polyline.points):
selectedSplines.append(polyline)
else:
if any(point.select_control_point == True for point in polyline.bezier_points):
selectedSplines.append(polyline)
if not selectedSplines:
selectedSplines = curveObj.data.splines
else:
selectedSplines = curveObj.data.splines
for polyline in selectedSplines: # for strand point
if polyline.type == 'NURBS' or polyline.type == 'POLY':
points = polyline.points
else:
points = polyline.bezier_points
if len(points) > 1: # skip single points
pointsList.append([point.co.to_3d() for point in points])
pointsRadius.append([point.radius for point in points])
pointsTilt.append([point.tilt for point in points])
backup_mat_indices = [spline.material_index for spline in selectedSplines]
interpolRad = []
interpolTilt = []
splinePointsList = interpol_Catmull_Rom(pointsList, self.t_in_y, uniform_spacing = self.uniformPointSpacing, same_point_count=self.equalPointCount)
if self.equalPointCount: # each output spline will have same point count
t_ins_y = [i / (self.t_in_y - 1) for i in range(self.t_in_y)]
for radii, tilts in zip(pointsRadius, pointsTilt): # per strand
t_rad = [i / (len(radii) - 1) for i in range(len(radii))]
interpolRad.append(np.interp(t_ins_y, t_rad, radii)) # first arg len() = out len
interpolTilt.append(np.interp(t_ins_y, t_rad, tilts)) # first arg len() = out len
else: # shorter output splines will have less points
lens = [len(x) for x in splinePointsList]
for radii, tilts, strandLen in zip(pointsRadius, pointsTilt, lens): # per strand
t_ins_Normalized = [i / (strandLen - 1) for i in range(strandLen)]
t_rad = [[i / (len(radii) - 1) for i in range(len(radii))]]
interpolRad.append(np.interp(t_ins_Normalized, t_rad[0], radii)) # first arg len() = out len
interpolTilt.append(np.interp(t_ins_Normalized, t_rad[0], tilts)) # first arg len() = out len
curveData = curveObj.data
# spline_type =
if self.onlySelection:
for spline in selectedSplines:
curveData.splines.remove(spline)
else:
curveData.splines.clear()
newSplines = []
for k, splinePoints in enumerate(splinePointsList): # for each strand/ring
curveLenght = len(splinePoints)
polyline = curveData.splines.new(self.hairType)
newSplines.append(polyline)
if self.hairType == 'BEZIER':
polyline.bezier_points.add(curveLenght - 1)
elif self.hairType == 'POLY' or self.hairType == 'NURBS':
polyline.points.add(curveLenght - 1)
if self.hairType == 'NURBS':
polyline.order_u = self.nurbs_order if self.input_spline_type == 'NURBS' else 3
polyline.use_endpoint_u = True
np_splinePointsOnes = np.ones((len(splinePoints), 4)) # 4 coord x,y,z ,1
np_splinePointsOnes[:, :3] = splinePoints
if self.hairType == 'BEZIER':
polyline.bezier_points.foreach_set('co', np_splinePointsOnes[:, :3])
polyline.bezier_points.foreach_set('radius', interpolRad[k])
polyline.bezier_points.foreach_set('tilt', interpolTilt[k])
polyline.bezier_points.foreach_set('handle_left_type', 'AUTO')
polyline.bezier_points.foreach_set('handle_right_type', 'AUTO')
else:
polyline.points.foreach_set('co', np_splinePointsOnes.ravel())
polyline.points.foreach_set('radius', interpolRad[k])
polyline.points.foreach_set('tilt', interpolTilt[k])
curveData.resolution_u = self.bezierRes
# bpy.ops.object.curve_uv_refresh()
for backup_mat, newSpline in zip(backup_mat_indices, newSplines):
newSpline.material_index = backup_mat
return {"FINISHED"}
| [
"[email protected]"
] | |
7e33879f634aa7e8d75988cebf28a1a0a95922cf | 9918208c80a3c396d8a1e13783d501d60dbc2050 | /digitalearthau/index.py | 184f71b63443c944423a74ab43f21a32af6c40c5 | [] | no_license | benjimin/digitalearthau | 2d3010be76fad0d0b6b4854dbbad07e98254b239 | 5098bf3c88627cad78a8caa5ab703c586c17a6f7 | refs/heads/develop | 2022-02-27T07:36:16.009689 | 2017-09-14T05:51:27 | 2017-09-14T05:51:27 | 103,460,937 | 0 | 0 | null | 2017-09-13T23:10:15 | 2017-09-13T23:10:15 | null | UTF-8 | Python | false | false | 7,353 | py | import collections
import uuid
from datetime import datetime
from typing import Iterable, Optional, Mapping, List
from datacube.index import index_connect
from datacube.index._api import Index
from datacube.model import Dataset
from datacube.scripts import dataset as dataset_script
from datacube.utils import uri_to_local_path
from digitalearthau.utils import simple_object_repr
class DatasetLite:
"""
A small subset of datacube.model.Dataset.
A "real" dataset needs a lot of initialisation: types etc, so this is easier to test with.
We also, in this script, depend heavily on the __eq__ behaviour of this particular class (by id only), and subtle
bugs could occur if the core framework made changes to it.
"""
def __init__(self, id_: uuid.UUID, archived_time: datetime = None) -> None:
# Sanity check of the type, as our equality checks are quietly wrong if the types don't match,
# and we've previously had problems with libraries accidentally switching string/uuid types...
assert isinstance(id_, uuid.UUID)
self.id = id_
self.archived_time = archived_time
@property
def is_archived(self):
"""
Is this dataset archived?
(an archived dataset is one that is not intended to be used by users anymore: eg. it has been
replaced by another dataset. It will not show up in search results, but still exists in the
system via provenance chains or through id lookup.)
:rtype: bool
"""
return self.archived_time is not None
def __eq__(self, other):
if not other:
return False
return self.id == other.id
def __hash__(self):
return hash(self.id)
@classmethod
def from_agdc(cls, dataset: Dataset):
return DatasetLite(dataset.id, archived_time=dataset.archived_time)
def __repr__(self):
return simple_object_repr(self)
class DatasetPathIndex:
"""
An index of datasets and their URIs.
This is a slightly questionable attempt to make testing/mocking simpler.
There's two implementations: One in-memory and one that uses a real datacube.
(MemoryDatasetPathIndex and AgdcDatasetPathIndex)
"""
def iter_all_uris(self, query: dict) -> Iterable[str]:
raise NotImplementedError
def get_datasets_for_uri(self, uri: str) -> Iterable[DatasetLite]:
raise NotImplementedError
def get(self, dataset_id: uuid.UUID) -> Optional[DatasetLite]:
raise NotImplementedError
def add_location(self, dataset: DatasetLite, uri: str) -> bool:
raise NotImplementedError
def remove_location(self, dataset: DatasetLite, uri: str) -> bool:
raise NotImplementedError
def add_dataset(self, dataset: DatasetLite, uri: str):
raise NotImplementedError
def as_map(self) -> Mapping[DatasetLite, Iterable[str]]:
"""Map of all datasets to their uri list. Convenience method for tests"""
raise NotImplementedError
def close(self):
"""Do any clean-up as needed before forking."""
# Default implementation: no-op
pass
class AgdcDatasetPathIndex(DatasetPathIndex):
def __init__(self, index: Index) -> None:
super().__init__()
self._index = index
self._rules = dataset_script.load_rules_from_types(self._index)
def iter_all_uris(self, query: dict) -> Iterable[str]:
for uri, in self._index.datasets.search_returning(['uri'], **query):
yield str(uri)
@classmethod
def connect(cls) -> 'AgdcDatasetPathIndex':
return cls(index_connect(application_name='digitalearthau-pathsync'))
def get_datasets_for_uri(self, uri: str) -> Iterable[DatasetLite]:
for d in self._index.datasets.get_datasets_for_location(uri=uri):
yield DatasetLite.from_agdc(d)
def remove_location(self, dataset: DatasetLite, uri: str) -> bool:
was_removed = self._index.datasets.remove_location(dataset.id, uri)
return was_removed
def get(self, dataset_id: uuid.UUID) -> Optional[DatasetLite]:
agdc_dataset = self._index.datasets.get(dataset_id)
return DatasetLite.from_agdc(agdc_dataset) if agdc_dataset else None
def add_location(self, dataset: DatasetLite, uri: str) -> bool:
was_removed = self._index.datasets.add_location(dataset.id, uri)
return was_removed
def add_dataset(self, dataset: DatasetLite, uri: str):
path = uri_to_local_path(uri)
for d in dataset_script.load_datasets([path], self._rules):
if d.id == dataset.id:
self._index.datasets.add(d, sources_policy='ensure')
break
else:
raise RuntimeError('Dataset not found at path: %s, %s' % (dataset.id, uri))
def close(self):
self._index.close()
def as_map(self) -> Mapping[DatasetLite, Iterable[str]]:
"""
All contained (dataset, [location]) values, to check test results.
"""
return dict(
(
DatasetLite(dataset.id),
tuple(dataset.uris)
)
for dataset in self._index.datasets.search()
)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
class MemoryDatasetPathIndex(DatasetPathIndex):
"""
An in-memory implementation, so that we can test without using a real datacube index.
"""
def get(self, dataset_id: uuid.UUID) -> Optional[DatasetLite]:
for d in self._records.keys():
if d.id == dataset_id:
return d
return None
def __init__(self):
super().__init__()
# Map of dataset to locations.
self._records = collections.defaultdict(list) # type: Mapping[DatasetLite, List[str]]
def reset(self):
self._records = collections.defaultdict(list)
def iter_all_uris(self, query: dict) -> Iterable[str]:
for uris in self._records.values():
yield from uris
def add_location(self, dataset: DatasetLite, uri: str) -> bool:
if dataset not in self._records:
raise ValueError("Unknown dataset {} -> {}".format(dataset.id, uri))
return self._add(dataset, uri)
def _add(self, dataset_id, uri):
if uri in self._records[dataset_id]:
# Not added
return False
self._records[dataset_id].append(uri)
return True
def remove_location(self, dataset: DatasetLite, uri: str) -> bool:
if uri not in self._records[dataset]:
# Not removed
return False
# We never remove the dataset key, only the uris.
self._records[dataset].remove(uri)
return True
def get_datasets_for_uri(self, uri: str) -> Iterable[DatasetLite]:
for dataset, uris in self._records.items():
if uri in uris:
yield dataset
def as_map(self) -> Mapping[DatasetLite, Iterable[str]]:
"""
All contained (dataset, [location]) values, to check test results.
"""
return {id_: tuple(uris) for id_, uris in self._records.items()}
def add_dataset(self, dataset: DatasetLite, uri: str):
# We're not actually storing datasets...
return self._add(dataset, uri)
| [
"[email protected]"
] | |
15b6ae2d70b9799cb8748159e727ba2aff01ca67 | a7b4bd1db26f71ab941076691d894583e167a3fd | /tools/cli_auto_doc.py | 3fa4e46f23cc9b1663fdece8826ea5510b80263b | [
"Apache-2.0"
] | permissive | Mirantis/stackalytics | c422ccb27baa3f1fd7e68b9732ba0203144a3657 | 96ec7c6c630a9f2532b808069e045d434bbac200 | refs/heads/master | 2021-01-18T21:58:38.904481 | 2017-01-25T11:14:12 | 2017-01-25T11:14:12 | 10,863,780 | 3 | 4 | Apache-2.0 | 2020-02-26T11:45:53 | 2013-06-22T11:17:28 | Python | UTF-8 | Python | false | false | 1,806 | py | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
try:
import ConfigParser as configparser
except ImportError:
import configparser
def split_multiline(value):
value = [element for element in
(line.strip() for line in value.split('\n'))
if element]
return value
def get_entry_points(config):
if 'entry_points' not in config:
return {}
return dict((option, split_multiline(value))
for option, value in config['entry_points'].items())
def make(cfg, dest):
parser = configparser.RawConfigParser()
parser.read(cfg)
config = {}
for section in parser.sections():
config[section] = dict(parser.items(section))
entry_points = get_entry_points(config)
console_scripts = entry_points.get('console_scripts')
if console_scripts:
for item in console_scripts:
tool = item.split('=')[0].strip()
print('Running %s' % tool)
os.system('%(tool)s --help > %(dest)s/%(tool)s.txt' %
dict(tool=tool, dest=dest))
if len(sys.argv) < 2:
print('Usage: cli_auto_doc <dest folder>')
sys.exit(1)
print('Generating docs from help to console tools')
make(cfg='setup.cfg', dest=sys.argv[1])
| [
"[email protected]"
] | |
58893a54c197fb68eeb0d035302bf64d8d6e53e9 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/gD3.py | aa7152104068969fce4fab0f59d40adbf339df10 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'gD3':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
1a8b3763c8a94e48cf8da659c686babc72716600 | 80abe7427ca501da06a9507cefa52d5c290f2833 | /Chapter04/topic_modeling.py | 841891d56168915143ec57282aeab11713c75372 | [] | no_license | CodedQuen/Raspberry-Pi-3-Cookbook-for-Python-Programmers | 7910c9cf9ebaf6f42510bd531bf965fd03e6efe8 | 4a77452c4510fd9c7da62099a93fdbc95a86245a | refs/heads/master | 2022-06-10T04:36:59.316284 | 2020-05-05T10:18:33 | 2020-05-05T10:18:33 | 261,421,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py |
from nltk.tokenize import RegexpTokenizer
from nltk.stem.snowball import SnowballStemmer
from gensim import models, corpora
from nltk.corpus import stopwords
# Load input words
def load_words(in_file):
element = []
with open(in_file, 'r') as f:
for line in f.readlines():
element.append(line[:-1])
return element
# Class to preprocedure of text
class Preprocedure(object):
# Initialize various operators
def __init__(self):
# Create a regular expression tokenizer
self.tokenizer = RegexpTokenizer(r'\w+')
# get the list of stop words
self.english_stop_words= stopwords.words('english')
# Create a Snowball stemmer
self.snowball_stemmer = SnowballStemmer('english')
# Tokenizing, stop word removal, and stemming
def procedure(self, in_data):
# Tokenize the string
token = self.tokenizer.tokenize(in_data.lower())
# Remove the stop words
tokenized_stopwords = [x for x in token if not x in self.english_stop_words]
# Perform stemming on the tokens
token_stemming = [self.snowball_stemmer.stem(x) for x in tokenized_stopwords]
return token_stemming
if __name__=='__main__':
# File containing linewise input data
in_file = 'data_topic_modeling.txt'
# Load words
element = load_words(in_file)
# Create a preprocedure object
preprocedure = Preprocedure()
# Create a list for processed documents
processed_tokens = [preprocedure.procedure(x) for x in element]
# Create a dictionary based on the tokenized documents
dict_tokens = corpora.Dictionary(processed_tokens)
corpus = [dict_tokens.doc2bow(text) for text in processed_tokens]
# Generate the LDA model based on the corpus we just created
num_of_topics = 2
num_of_words = 4
ldamodel = models.ldamodel.LdaModel(corpus,
num_topics=num_of_topics, id2word=dict_tokens, passes=25)
print "Most contributing words to the topics:"
for item in ldamodel.print_topics(num_topics=num_of_topics, num_words=num_of_words):
print "\nTopic", item[0], "==>", item[1]
| [
"[email protected]"
] | |
25b980a0be5f061c6bdc488b9c6e51969e8a81c7 | ceb5b7c3882b2bf3f53219356e914462c680f059 | /azure-mgmt-compute/azure/mgmt/compute/containerservice/v2017_01_31/models/container_service_client_enums.py | 279f5dcb9d3ff37bd26b6e9a9c88b555f28c3dff | [
"MIT"
] | permissive | codalab/azure-sdk-for-python | b712da2a377cfa526e0ffa4fa40408e6a81e48e3 | f4c92d02d46fcdee9da430a18a394b108a2f8920 | refs/heads/master | 2021-01-19T14:40:23.567035 | 2017-04-11T22:49:13 | 2017-04-11T22:49:13 | 88,180,409 | 1 | 0 | null | 2017-04-13T15:36:45 | 2017-04-13T15:36:44 | null | UTF-8 | Python | false | false | 2,291 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ContainerServiceOchestratorTypes(Enum):
swarm = "Swarm"
dcos = "DCOS"
custom = "Custom"
kubernetes = "Kubernetes"
class ContainerServiceVMSizeTypes(Enum):
standard_a0 = "Standard_A0"
standard_a1 = "Standard_A1"
standard_a2 = "Standard_A2"
standard_a3 = "Standard_A3"
standard_a4 = "Standard_A4"
standard_a5 = "Standard_A5"
standard_a6 = "Standard_A6"
standard_a7 = "Standard_A7"
standard_a8 = "Standard_A8"
standard_a9 = "Standard_A9"
standard_a10 = "Standard_A10"
standard_a11 = "Standard_A11"
standard_d1 = "Standard_D1"
standard_d2 = "Standard_D2"
standard_d3 = "Standard_D3"
standard_d4 = "Standard_D4"
standard_d11 = "Standard_D11"
standard_d12 = "Standard_D12"
standard_d13 = "Standard_D13"
standard_d14 = "Standard_D14"
standard_d1_v2 = "Standard_D1_v2"
standard_d2_v2 = "Standard_D2_v2"
standard_d3_v2 = "Standard_D3_v2"
standard_d4_v2 = "Standard_D4_v2"
standard_d5_v2 = "Standard_D5_v2"
standard_d11_v2 = "Standard_D11_v2"
standard_d12_v2 = "Standard_D12_v2"
standard_d13_v2 = "Standard_D13_v2"
standard_d14_v2 = "Standard_D14_v2"
standard_g1 = "Standard_G1"
standard_g2 = "Standard_G2"
standard_g3 = "Standard_G3"
standard_g4 = "Standard_G4"
standard_g5 = "Standard_G5"
standard_ds1 = "Standard_DS1"
standard_ds2 = "Standard_DS2"
standard_ds3 = "Standard_DS3"
standard_ds4 = "Standard_DS4"
standard_ds11 = "Standard_DS11"
standard_ds12 = "Standard_DS12"
standard_ds13 = "Standard_DS13"
standard_ds14 = "Standard_DS14"
standard_gs1 = "Standard_GS1"
standard_gs2 = "Standard_GS2"
standard_gs3 = "Standard_GS3"
standard_gs4 = "Standard_GS4"
standard_gs5 = "Standard_GS5"
| [
"[email protected]"
] | |
985dad9eac8bbe27fa5b3adfb04734809e871ce4 | ae16f9dd815605e5f52f27dda77bd735abafb587 | /parser/councilors/elections_config.py | 1bf0e2a7db9c45f4024b1026e1cd6c38e1f368c0 | [
"CC0-1.0"
] | permissive | travishen/councilor-voter-guide | aa4a1aa3b86db9ca40b291baf461ff0330a369c0 | 09d9365676335854b2d4d0981f5cb925adf4c958 | refs/heads/master | 2020-04-13T10:09:07.688276 | 2018-11-28T14:51:05 | 2018-11-28T14:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,827 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import re
import json
import psycopg2
import ast
from sys import argv
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import db_settings
conn = db_settings.con()
c = conn.cursor()
election_year = ast.literal_eval(argv[1])['election_year']
def parse_districts(county, districts):
districts = re.sub(u'^(居住|【)', '', districts)
category = re.search(u'(平地原住民|山地原住民)$', districts)
districts = re.sub(u'(平地原住民|山地原住民)$', '', districts)
if category:
category = category.group()
districts = re.sub(u'(】|之)', '', districts)
l = []
if districts:
for district in districts.split(u'、'):
if len(district) == 2:
l = districts.split(u'、')
break
if not re.search(re.sub(u'[縣市]$', '', county), district):
district = re.sub(u'[鄉鎮市區]$', '', district)
l.append(district)
return l, category
# update constituencies
constituencies = json.load(open('../../voter_guide/static/json/dest/constituencies_%s.json' % election_year))
counties = {}
for region in constituencies:
if region['county'] not in counties.keys():
counties.update({
region['county']: {
'regions': [],
'duplicated': []
}
})
districts_list, category = parse_districts(region['county'], region['district'])
if category:
if districts_list:
district = u'%s(%s)' % (category, u'、'.join(districts_list))
else:
district = u'%s(%s)' % (category, u'全%s' % region['county'])
else:
district = u'、'.join(districts_list)
counties[region['county']]['regions'].append({
'constituency': region['constituency'],
'districts_list': districts_list,
'district': district,
'category': category
})
c.execute('''
update candidates_terms
set district = %s
where election_year = %s and county = %s and constituency = %s
''', (district, election_year, region['county'], region['constituency']))
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('credential.json', scope)
gc = gspread.authorize(credentials)
sh = gc.open_by_key('10zFDmMF9CJDXSIENXO8iJXKE5CLBY62i_mSeqe_qDug')
worksheets = sh.worksheets()
for wks in worksheets:
rows = wks.get_all_records()
if wks.title == u'議員':
for row in rows:
print row['county'], row['constituency']
if row['count_this']:
counties[row['county']]['regions'][int(row['constituency'])-1]['elected_count_pre'] = row['count_pre']
counties[row['county']]['regions'][int(row['constituency'])-1]['elected_count'] = row['count_this']
counties[row['county']]['regions'][int(row['constituency'])-1]['reserved_seats'] = row['reserved_seats']
else:
continue
config = json.dumps({'constituencies': counties})
c.execute('''
INSERT INTO elections_elections(id, data)
VALUES (%s, %s)
ON CONFLICT (id)
DO UPDATE
SET data = (COALESCE(elections_elections.data, '{}'::jsonb) || %s::jsonb)
''', [election_year, config, config])
conn.commit()
# update constituency_change
district_versions = json.load(open('../district_versions.json'))
config = json.dumps({'constituency_change': district_versions.get(election_year, {})})
c.execute('''
INSERT INTO elections_elections(id, data)
VALUES (%s, %s)
ON CONFLICT (id)
DO UPDATE
SET data = (COALESCE(elections_elections.data, '{}'::jsonb) || %s::jsonb)
''', [election_year, config, config])
conn.commit()
| [
"[email protected]"
] | |
88842d784deeecde1c87e82ab837462e8ead03f9 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ZZsnGAjYLyosG9zmH_12.py | aa9e707c08223592b4481ac84b90ac438ecda630 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py |
def flash(fc):
var1, oper, var2 = fc
return var1 + var2 if oper == '+' else var1 - var2 if oper == '-' else var1 * var2 if oper == 'x' else round(var1 / var2, 2) if oper == '/' and var2 else None
| [
"[email protected]"
] | |
3f37df8301b6e1dbb044c648cb837c0f03ffdbc6 | a1582cec6239f627c6740b391d751f429675ee39 | /test_todo.py | 039a3c22c18438751c553f7c5c877b02e940182e | [] | no_license | SolbiatiAlessandro/todos | 7cabfd35d6c7d3cdd3232051be4a96c667d55f21 | b85e74c4fc220dccc5a0a05a288465b2da98f6d0 | refs/heads/master | 2020-03-28T18:56:09.847298 | 2018-10-15T15:07:01 | 2018-10-15T15:07:01 | 148,928,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import unittest
import todo
from os import path
dir_path = path.dirname(path.realpath(__file__))
class testTODO( unittest.TestCase ):
def test_readElems( self ):
self.assertIsNotNone( todo.readElems() )
def test_todoDone( self ):
with open(dir_path+'/todos','a') as f:
f.write('"[test elem]" 0')
#import pdb;pdb.set_trace()
elems = todo.readElems()
self.assertEqual( "[test elem]", elems[0][1] )
todo.todoDone()
elems = todo.readElems()
self.assertNotEqual( "[test elem]", elems[0][1] )
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
bec5d5fbb09b6260d514209bc438f344d215832b | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=30/sched.py | a85202e958d39e172c17afa700742b708255c6d6 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | -S 0 -X RUN -Q 0 -L 2 84 250
-S 1 -X RUN -Q 0 -L 2 80 250
-S 0 -X RUN -Q 0 -L 2 74 250
-S 0 -X RUN -Q 0 -L 2 59 250
-S 2 -X RUN -Q 1 -L 1 57 200
-S 2 -X RUN -Q 1 -L 1 48 175
-S 2 -X RUN -Q 1 -L 1 40 125
-S 2 -X RUN -Q 1 -L 1 33 300
-S 3 -X RUN -Q 2 -L 1 29 100
-S 3 -X RUN -Q 2 -L 1 27 125
-S 3 -X RUN -Q 2 -L 1 21 100
-S 3 -X RUN -Q 2 -L 1 19 150
-S 4 -X RUN -Q 3 -L 1 19 100
-S 4 -X RUN -Q 3 -L 1 15 100
-S 4 -X RUN -Q 3 -L 1 14 100
| [
"[email protected]"
] | |
c2abb820a33634fbd4d2baa8cc40894fd5ffc9db | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/customer_service/migrations/0018_remove_customerservice_serviceitem.py | 5389c0887e5e5598bfdb43884190c5126c6d8681 | [] | no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 3.1.2 on 2020-11-16 14:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer_service', '0017_auto_20201116_1115'),
]
operations = [
migrations.RemoveField(
model_name='customerservice',
name='serviceitem',
),
]
| [
"[email protected]"
] | |
c5764734108e5118eb033f9417b70073be8ac9a0 | 28541d61368a14a0d5003db4cc07fed21b40c41f | /Chapter-4/maze3.py | 2a2bcf9a00c029002b258874bd88cd10f9fc123a | [] | no_license | eizin6389/python_algorithm | 390861f9342ce907f2cda0b45b84d364bcba7541 | abf3588ed97a343b6559eb5d69156708d42bc243 | refs/heads/master | 2022-12-06T20:48:49.470312 | 2020-08-14T13:29:26 | 2020-08-14T13:29:26 | 282,905,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | maze = [
[9,9,9,9,9,9,9,9,9,9,9,9],
[9,0,0,0,9,0,0,0,0,0,0,9],
[9,0,9,0,0,0,9,9,0,9,9,9],
[9,0,9,9,0,9,0,0,0,9,0,9],
[9,0,0,0,9,0,0,9,9,0,9,9],
[9,9,9,0,0,9,0,9,0,0,0,9],
[9,0,0,0,9,0,9,0,0,9,1,9],
[9,0,9,0,0,0,0,9,0,0,9,9],
[9,0,0,9,0,9,0,0,9,0,0,9],
[9,0,9,0,9,0,9,0,0,9,0,9],
[9,0,0,0,0,0,0,9,0,0,0,9],
[9,9,9,9,9,9,9,9,9,9,9,9]
]
dir = [[1,0],[0,1],[-1,0],[0,-1]]
x, y, depth, d = 1, 1, 0, 0
while maze[x][y] != 1:
maze[x][y] = 2
for i in range(len(dir)):
j = (d + i - 1) % len(dir)
if maze[x + dir[j][0]][y + dir[j][1]] < 2:
x += dir[j][0]
y += dir[j][1]
d = j
depth += 1
break
elif maze[x + dir[j][0]][y + dir[j][1]] == 2:
x += dir[j][0]
y += dir[j][1]
d = j
depth -= 1
break
print(depth)
| [
"[email protected]"
] | |
3eaa1551407f554655a52f1b22c4d721669fa579 | 3e6e18edfe81bb19e298ae4e1831cb76c2c6069d | /src/lpcshop/models/bottles.py | a4dba719454dd661eebe4d48daada55e5b64e9f8 | [] | no_license | libertalia/lpc | 2e72de7eee36cd92d62e4d250186bda2353c179a | 972343abdcffffc2bec0cac4e2057c91edfa1716 | refs/heads/master | 2023-01-07T08:13:02.708844 | 2016-05-13T01:34:57 | 2016-05-13T01:34:57 | 58,680,165 | 0 | 1 | null | 2022-12-26T19:59:29 | 2016-05-12T22:02:24 | JavaScript | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.encoding import python_2_unicode_compatible
from djangocms_text_ckeditor.fields import HTMLField
from shop.money.fields import MoneyField
from shop.models.product import BaseProduct, BaseProductManager
from shop.models.defaults.mapping import ProductPage, ProductImage
@python_2_unicode_compatible
class Bottle(BaseProduct):
# common product fields
product_name = models.CharField(max_length=255, verbose_name=_("Product Name"))
slug = models.SlugField(verbose_name=_("Slug"))
unit_price = MoneyField(_("Unit price"), decimal_places=3,
help_text=_("Net price for this product"))
description = HTMLField(verbose_name=_("Description"),
help_text=_("Description for the list view of products."))
# controlling the catalog
order = models.PositiveIntegerField(verbose_name=_("Sort by"), db_index=True)
cms_pages = models.ManyToManyField('cms.Page', through=ProductPage,
help_text=_("Choose list view this product shall appear on."))
images = models.ManyToManyField('filer.Image', through=ProductImage)
objects = BaseProductManager()
# filter expression used to search for a product item using the Select2 widget
lookup_fields = ('product_name__icontains',)
class Meta:
verbose_name = _("Bottle")
ordering = ('order',)
def __str__(self):
return self.product_name
@property
def sample_image(self):
return self.images.first()
def get_price(self, request):
return self.unit_price
def get_absolute_url(self):
# sorting by highest level, so that the canonical URL associates with the
# most generic category
cms_page = self.cms_pages.order_by('depth').last()
if cms_page is None:
return urljoin('category-not-assigned', self.slug)
return urljoin(cms_page.get_absolute_url(), self.slug)
| [
"[email protected]"
] | |
07030cbb64db6488b93f8e7f03c975d1d39c099d | df5cd640098a10e754a9552187fc5ad8c50df90c | /colour/examples/algebra/examples_interpolation.py | 4acf509db6a9fd00459d7e4bce455a3a20c6b8ca | [
"BSD-3-Clause"
] | permissive | ofek/colour | d4963c9b77b0d119cf3ef3296dbf5369167472df | 04f4863ef49093a93244c1fedafd1d5e2b1b76da | refs/heads/develop | 2021-07-08T05:33:14.220392 | 2017-09-29T22:34:14 | 2017-09-29T22:34:14 | 105,406,461 | 0 | 0 | null | 2017-09-30T23:06:18 | 2017-09-30T23:06:18 | null | UTF-8 | Python | false | false | 3,265 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases interpolation computations.
"""
import pylab
import colour
from colour.plotting import * # noqa
from colour.utilities.verbose import message_box
message_box('Interpolation Computations')
message_box(('Comparing "Sprague (1880)" and "Cubic Spline" recommended '
'interpolation methods to "Pchip" method.'))
uniform_spd_data = {
340: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820: 0.0000
}
non_uniform_spd_data = {
340.1: 0.0000,
360: 0.0000,
380: 0.0000,
400: 0.0641,
420: 0.0645,
440: 0.0562,
460: 0.0537,
480: 0.0559,
500: 0.0651,
520: 0.0705,
540: 0.0772,
560: 0.0870,
580: 0.1128,
600: 0.1360,
620: 0.1511,
640: 0.1688,
660: 0.1996,
680: 0.2397,
700: 0.2852,
720: 0.0000,
740: 0.0000,
760: 0.0000,
780: 0.0000,
800: 0.0000,
820.9: 0.0000
}
base_spd = colour.SpectralPowerDistribution('Reference', uniform_spd_data)
uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Uniform - Sprague Interpolation', uniform_spd_data)
uniform_pchip_interpolated_spd = colour.SpectralPowerDistribution(
'Uniform - Pchip Interpolation', uniform_spd_data)
non_uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Non Uniform - Cubic Spline Interpolation', non_uniform_spd_data)
uniform_interpolated_spd.interpolate(colour.SpectralShape(interval=1))
uniform_pchip_interpolated_spd.interpolate(
colour.SpectralShape(interval=1), method='Pchip')
non_uniform_interpolated_spd.interpolate(colour.SpectralShape(interval=1))
shape = base_spd.shape
x_limit_min, x_limit_max, y_limit_min, y_limit_max = [], [], [], []
pylab.plot(
base_spd.wavelengths,
base_spd.values,
'ro-',
label=base_spd.name,
linewidth=2)
pylab.plot(
uniform_interpolated_spd.wavelengths,
uniform_interpolated_spd.values,
label=uniform_interpolated_spd.name,
linewidth=2)
pylab.plot(
uniform_pchip_interpolated_spd.wavelengths,
uniform_pchip_interpolated_spd.values,
label=uniform_pchip_interpolated_spd.name,
linewidth=2)
pylab.plot(
non_uniform_interpolated_spd.wavelengths,
non_uniform_interpolated_spd.values,
label=non_uniform_interpolated_spd.name,
linewidth=2)
x_limit_min.append(shape.start)
x_limit_max.append(shape.end)
y_limit_min.append(min(base_spd.values))
y_limit_max.append(max(base_spd.values))
settings = {
'x_label':
'Wavelength $\\lambda$ (nm)',
'y_label':
'Spectral Power Distribution',
'x_tighten':
True,
'legend':
True,
'legend_location':
'upper left',
'x_ticker':
True,
'y_ticker':
True,
'limits': (min(x_limit_min), max(x_limit_max), min(y_limit_min),
max(y_limit_max))
}
boundaries(**settings)
decorate(**settings)
display(**settings)
| [
"[email protected]"
] | |
467775b4bd0bdc529f7af369a772db9776c3f4d4 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/corp/dwacon6_a.py | 0f60c706492fb0f7e55329255dd53fcbe06cb6d9 | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | N = int(input())
titles, times = [], []
for _ in range(N):
s, t = input().split()
titles.append(s)
times.append(int(t))
idx = titles.index(input())
ans = 0
for i in range(idx+1, N):
ans += times[i]
print(ans)
| [
"[email protected]"
] | |
396451adf046ae9a1e9a93d08c731002c02b4a78 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AntMerchantExpandIndirectOnlineModifyResponse.py | deb575d50e426359ce6993ae14225946249fa464 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntMerchantExpandIndirectOnlineModifyResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandIndirectOnlineModifyResponse, self).__init__()
self._sub_merchant_id = None
@property
def sub_merchant_id(self):
return self._sub_merchant_id
@sub_merchant_id.setter
def sub_merchant_id(self, value):
self._sub_merchant_id = value
def parse_response_content(self, response_content):
response = super(AntMerchantExpandIndirectOnlineModifyResponse, self).parse_response_content(response_content)
if 'sub_merchant_id' in response:
self.sub_merchant_id = response['sub_merchant_id']
| [
"[email protected]"
] | |
4b664002f3b91925204f95cf5afde92db89ca9f4 | 154e563104144721865a90987db0332bef08a4c3 | /rh_aligner/plotting/__init__.py | 8187d6023c6db7b60a29f9fbf00456387099c256 | [
"MIT"
] | permissive | Rhoana/rh_aligner | 565572d645769053c74a36ddf0f53ecc20d997fe | baab698f6520b9b999bccf423dc510b0c8f4b9bb | refs/heads/master | 2021-01-01T05:29:25.406459 | 2016-05-09T15:34:58 | 2016-05-09T15:34:58 | 56,165,015 | 3 | 3 | null | 2016-05-05T20:00:26 | 2016-04-13T15:43:33 | Python | UTF-8 | Python | false | false | 281 | py | """
Plotting of the stitching and alignment steps library
- to deubg the steps
"""
from .view_pre_pmcc_mfov import view_pre_pmcc_mfov
from .view_post_pmcc_mfov import view_post_pmcc_mfov
__all__ = [
'view_pre_pmcc_mfov',
'view_post_pmcc_mfov'
]
| [
"[email protected]"
] | |
25824908e100267109197ad1c04cca8d349a6f10 | 8cf0cf9b71b7c5fbaa150e9893bf461ef661045e | /ownblock/ownblock/apps/parking/models.py | 84c75498d4e8e94365b81a282ee43d877a925a7d | [
"MIT"
] | permissive | danjac/ownblock | 676b27a5aa0d4ce2ac2cd924a632489cd6fc21ee | ac662fb7efb2f04567e2f85638c1250286452611 | refs/heads/master | 2016-08-02T21:51:56.055598 | 2015-05-02T12:54:47 | 2015-05-02T12:54:47 | 34,940,828 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from django.conf import settings
from django.db import models
from django_countries.fields import CountryField
class Vehicle(models.Model):
description = models.CharField(max_length=100)
registration_number = models.CharField(max_length=12)
country = CountryField(default="FI")
resident = models.ForeignKey(settings.AUTH_USER_MODEL)
reserved_place = models.CharField(max_length=12, blank=True)
def __str__(self):
return self.registration_number
def get_groups(self):
return [self.resident,
self.resident.apartment.building.site.group]
def has_permission(self, user, perm):
if user.role == 'resident':
return user == self.resident
if user.role == 'manager':
return (self.resident.apartment.building.site_id ==
user.site_id)
return False
| [
"[email protected]"
] | |
fd6eb4ffc23f7389f26fd2d60442434609b29286 | 5f814192b19721dc9c06e0e9595738b0f8561233 | /OCR/east_text_detection.py | f8bee5a5e3d5d1b813617866d1b192837295a2ef | [] | no_license | irischo/civil_translation | 7b3c5c58e201f74547d5ae21123fdfd9d4bc5e64 | 240638a434957ea25cfac262da93fc23e292f6f2 | refs/heads/master | 2022-11-23T15:32:50.503095 | 2020-07-29T00:40:06 | 2020-07-29T00:40:06 | 283,387,321 | 0 | 0 | null | 2020-07-29T03:21:50 | 2020-07-29T03:21:49 | null | UTF-8 | Python | false | false | 2,829 | py | from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import time
import cv2
# argument parse
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', type=str, help='path to input image')
ap.add_argument('-east', '--east', type=str, help='path to input EAST text detector')
ap.add_argument('-c', '--min-confidence', type=float, default=0.5, help='minimum probability required to inspect a region')
ap.add_argument('-w', '--width', type=int, default=320, help='resized image width (should be multiple of 32)')
ap.add_argument('-e', '--height', type=int, default=320, help='resized image height (should be multiple of 32)')
args = vars(ap.parse_args())
# load image
image = cv2.imread(args['image'])
orig = image.copy()
(H, W) = image.shape[:2]
# set new width & height
(newW, newH) = (args['width'], args['height'])
rW = W / float(newW)
rH = H / float(newH)
# resize image
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
layerNames = [
'feature_fusion/Conv_7/Sigmoid', # text or not check
'feature_fusion/concat_3' # image geometry
]
# load pre-trained EAST text decorator (from frozen_east_text_detection.pb)
print('[INFO] loading EAST text detector ...')
net = cv2.dnn.readNet(args['east'])
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapPB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
# show timing information on text prediction
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < args['min_confidence']:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
boxes = non_max_suppression(np.array(rects), probs=confidences)
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
cv2.imshow('Text Detection', orig)
cv2.waitKey(0)
| [
"[email protected]"
] | |
564f224574f406c1a966ab5582a316627e5a9ae1 | 2cfa657fd119a23de2a5c2ae6d55e6d2516bae2d | /test/functional/wallet_keypool_topup.py | 1c1aa4fe3a776fdc70d840768a3b9deacdbccf53 | [
"MIT"
] | permissive | vivuscoin/vivuscoin | 640b10ae3a72c03b501e03b07caae09ce6c87c81 | ba0db89712234bf68b2d6b63ef2c420d65c7c25d | refs/heads/master | 2023-05-07T06:26:26.241247 | 2021-05-25T03:54:32 | 2021-05-25T03:54:32 | 362,198,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
class KeypoolRestoreTest(VivuscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert(not address_details["isscript"] and not address_details["iswitness"])
elif i == 1:
assert(address_details["isscript"] and not address_details["iswitness"])
else:
assert(not address_details["isscript"] and address_details["iswitness"])
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
connect_nodes_bi(self.nodes, 0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress())['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| [
"[email protected]"
] | |
12554a6f358810f3d7bcf732d99807639d1b65bf | 22ebdd6881730a9474ede8e8167c615990c4e275 | /prob17a.py | e5cae5301008b8a21864cb95ac76154a72222942 | [] | no_license | MMohan1/eueler | a96a465b265334b03645f2e2bb66c85395c54e75 | 05a88f1c9b41fbc3d6bcd95b38b83a6510b3b50a | refs/heads/master | 2021-01-18T15:14:35.320214 | 2015-02-02T11:02:06 | 2015-02-02T11:02:06 | 15,935,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | def prob17():
list1=[]
total_char=0
dict1={0:'',1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten'}
dict2={11:'eleven',12:'twelve',13:'thirteen',14:'fourteen',15:'fifteen',16:'sixteen',17:'seventeen',18:'eighteen',19:'nineteen'}
dict3={0:'',1:'ten',2:'twenty',3:'thirty',4:'fourty',5:'fifty',6:'sixty',7:'seventy',8:'eighty',9:'ninty'}
for i in range(1,100):
x=str(i)
if len(x) == 1:
list1.append(dict1[i])
elif len(x) == 2 and x[1] == '0':
list1.append(dict3[int(x[0])])
elif len(x) == 2 and x[0] == '1':
list1.append(dict2[i])
elif len(x) == 2:
list1.append(dict3[int(x[0])]+dict1[int(x[1])])
p = sum([len(i) for i in list1])
print list1,p
k = 3*((13*99)+p) + 3*((14*99)+p) + 3*((15*99)+p) + len('onethousand') + p + 99
print k
if __name__ == '__main__':
prob17()
| [
"[email protected]"
] | |
dada702324b30a4d4a00d067c7b3c97d8b05129b | b8ef1a5cd3856a8e9134c3313a4e23522f199df7 | /Baekjoon/1966_프린터 큐/1966_프린터 큐.py | 73d38dbd9d6bdf4902738765108954a7e7151128 | [] | no_license | scl2589/Algorithm_problem_solving | 910623d9675ae0219320abfd1fefc7d576027544 | 80db697cdd0180a7d4dbcfae4944d4a54191bddf | refs/heads/master | 2023-07-29T10:56:38.225206 | 2021-09-11T13:50:46 | 2021-09-11T13:50:46 | 235,363,353 | 0 | 0 | null | 2021-03-04T15:39:41 | 2020-01-21T14:36:41 | Python | UTF-8 | Python | false | false | 548 | py | from collections import deque
tc = int(input())
for _ in range(tc):
N, M = map(int, input().split())
impt = list(map(int, input().split()))
q = deque()
for idx, value in enumerate(impt):
q.append([idx, value])
count = 0
while True:
max_num = sorted(q, key = lambda x: x[1], reverse=True)[0][1]
num = q.popleft()
if num[0] == M and num[1] == max_num:
break
elif num[1] == max_num:
count += 1
else:
q.append(num)
print(count + 1)
| [
"[email protected]"
] | |
4e6dc77e570b5419eef0fc74fd16710afdfd3235 | 190d03cf370844548b9e8c89952dfbaec4d0c5c8 | /p103.py | 467aee99fa0ff340b0a00d481a047ab36a7d0d52 | [] | no_license | alainlou/leetcode | 446d101a9fd2f9eaa2229252e5909e7df36b4a74 | fe500bcb067be59aa048259e3860e9da6f98344d | refs/heads/master | 2022-10-16T12:20:44.726963 | 2022-09-18T15:29:05 | 2022-09-18T15:29:05 | 178,775,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from DS.TreeNode import TreeNode
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return []
ans = []
q = [(root, 0)]
while len(q) > 0:
curr = q.pop(0)
if len(ans) <= curr[1]:
ans.append([])
ans[-1].append(curr[0].val)
if curr[0].left:
q.append((curr[0].left, curr[1]+1))
if curr[0].right:
q.append((curr[0].right, curr[1]+1))
for i in range(1, len(ans), 2):
ans[i] = ans[i][::-1]
return ans
| [
"[email protected]"
] | |
5df906375ee0c7d24ede8dd570122ce0cbdd1251 | 9bdc5bd0b6195761fbceed17c0725bc48a5941a1 | /testing/keras_taylor_1D.py | f24a460e11be29c65a55abbe497af20fe014f122 | [] | no_license | lapsintra/htt-ml | bc6bbb12eda4a3f0abbc5c0db13940a31b667a08 | ce07cad6fcc8625b1595157de6486759b74f6d62 | refs/heads/master | 2020-04-05T16:29:29.858916 | 2018-12-04T19:32:10 | 2018-12-04T19:32:10 | 157,015,043 | 0 | 0 | null | 2018-11-10T19:38:56 | 2018-11-10T19:38:56 | null | UTF-8 | Python | false | false | 7,871 | py | #!/usr/bin/env python
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True # disable ROOT internal argument parser
import argparse
from array import array
import yaml
import pickle
import numpy as np
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['font.size'] = 16
import matplotlib.pyplot as plt
from matplotlib import cm
from keras.models import load_model
import tensorflow as tf
from tensorflow_derivative.inputs import Inputs
from tensorflow_derivative.outputs import Outputs
from tensorflow_derivative.derivatives import Derivatives
import logging
logger = logging.getLogger("keras_taylor_1D")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_arguments():
parser = argparse.ArgumentParser(description="Produce confusion matrice")
parser.add_argument("config_training", help="Path to training config file")
parser.add_argument("config_testing", help="Path to testing config file")
parser.add_argument("fold", type=int, help="Trained model to be tested.")
parser.add_argument(
"--no-abs",
action="store_true",
default=False,
help="Do not use abs for metric.")
parser.add_argument(
"--normalize",
action="store_true",
default=False,
help="Normalize rows.")
return parser.parse_args()
def parse_config(filename):
logger.debug("Load config %s.", filename)
return yaml.load(open(filename, "r"))
def main(args, config_test, config_train):
# Load preprocessing
path = os.path.join(config_train["output_path"],
config_test["preprocessing"][args.fold])
logger.info("Load preprocessing %s.", path)
preprocessing = pickle.load(open(path, "rb"))
# Load Keras model
path = os.path.join(config_train["output_path"],
config_test["model"][args.fold])
logger.info("Load keras model %s.", path)
model_keras = load_model(path)
# Get TensorFlow graph
inputs = Inputs(config_train["variables"])
try:
sys.path.append("htt-ml/training")
import keras_models
except:
logger.fatal("Failed to import Keras models.")
raise Exception
try:
name_keras_model = config_train["model"]["name"]
model_tensorflow_impl = getattr(
keras_models, config_train["model"]["name"] + "_tensorflow")
except:
logger.fatal(
"Failed to load TensorFlow version of Keras model {}.".format(
name_keras_model))
raise Exception
model_tensorflow = model_tensorflow_impl(inputs.placeholders, model_keras)
outputs = Outputs(model_tensorflow, config_train["classes"])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Get operations for first-order derivatives
deriv_ops = {}
derivatives = Derivatives(inputs, outputs)
for class_ in config_train["classes"]:
deriv_ops[class_] = []
for variable in config_train["variables"]:
deriv_ops[class_].append(derivatives.get(class_, [variable]))
# Loop over testing dataset
path = os.path.join(config_train["datasets"][(1, 0)[args.fold]])
logger.info("Loop over test dataset %s to get model response.", path)
file_ = ROOT.TFile(path)
mean_abs_deriv = {}
for i_class, class_ in enumerate(config_train["classes"]):
logger.debug("Process class %s.", class_)
tree = file_.Get(class_)
if tree == None:
logger.fatal("Tree %s does not exist.", class_)
raise Exception
values = []
for variable in config_train["variables"]:
typename = tree.GetLeaf(variable).GetTypeName()
if typename == "Float_t":
values.append(array("f", [-999]))
elif typename == "Int_t":
values.append(array("i", [-999]))
else:
logger.fatal("Variable {} has unknown type {}.".format(variable, typename))
raise Exception
tree.SetBranchAddress(variable, values[-1])
if tree.GetLeaf(variable).GetTypeName() != "Float_t":
logger.fatal("Weight branch has unkown type.")
raise Exception
weight = array("f", [-999])
tree.SetBranchAddress(config_test["weight_branch"], weight)
deriv_class = np.zeros((tree.GetEntries(),
len(config_train["variables"])))
weights = np.zeros((tree.GetEntries()))
for i_event in range(tree.GetEntries()):
tree.GetEntry(i_event)
# Preprocessing
values_stacked = np.hstack(values).reshape(1, len(values))
values_preprocessed = preprocessing.transform(values_stacked)
# Keras inference
response = model_keras.predict(values_preprocessed)
response_keras = np.squeeze(response)
# Tensorflow inference
response = sess.run(
model_tensorflow,
feed_dict={
inputs.placeholders: values_preprocessed
})
response_tensorflow = np.squeeze(response)
# Check compatibility
mean_error = np.mean(np.abs(response_keras - response_tensorflow))
if mean_error > 1e-5:
logger.fatal(
"Found mean error of {} between Keras and TensorFlow output for event {}.".
format(mean_error, i_event))
raise Exception
# Calculate first-order derivatives
deriv_values = sess.run(
deriv_ops[class_],
feed_dict={
inputs.placeholders: values_preprocessed
})
deriv_values = np.squeeze(deriv_values)
deriv_class[i_event, :] = deriv_values
# Store weight
weights[i_event] = weight[0]
if args.no_abs:
mean_abs_deriv[class_] = np.average((deriv_class), weights=weights, axis=0)
else:
mean_abs_deriv[class_] = np.average(np.abs(deriv_class), weights=weights, axis=0)
# Normalize rows
classes = config_train["classes"]
matrix = np.vstack([mean_abs_deriv[class_] for class_ in classes])
if args.normalize:
for i_class, class_ in enumerate(classes):
matrix[i_class, :] = matrix[i_class, :] / np.sum(
matrix[i_class, :])
# Plotting
variables = config_train["variables"]
plt.figure(0, figsize=(len(variables), len(classes)))
axis = plt.gca()
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
axis.text(
j + 0.5,
i + 0.5,
'{:.2f}'.format(matrix[i, j]),
ha='center',
va='center')
q = plt.pcolormesh(matrix, cmap='Wistia')
#cbar = plt.colorbar(q)
#cbar.set_label("mean(abs(Taylor coefficients))", rotation=270, labelpad=20)
plt.xticks(
np.array(range(len(variables))) + 0.5, variables, rotation='vertical')
plt.yticks(
np.array(range(len(classes))) + 0.5, classes, rotation='horizontal')
plt.xlim(0, len(config_train["variables"]))
plt.ylim(0, len(config_train["classes"]))
output_path = os.path.join(config_train["output_path"],
"fold{}_keras_taylor_1D.png".format(args.fold))
logger.info("Save plot to {}.".format(output_path))
plt.savefig(output_path, bbox_inches='tight')
if __name__ == "__main__":
args = parse_arguments()
config_test = parse_config(args.config_testing)
config_train = parse_config(args.config_training)
main(args, config_test, config_train)
| [
"[email protected]"
] | |
fc07829f755d7e6cdcfbb45f1595dfd39618bdaa | 6d69b249a81e076d79787dd08eb8957908052052 | /libs/parse/sections/grouper_mixins/blocks.py | 154423ab5e2c3ebe6544db079d4af4ebccbedda5 | [] | no_license | 2vitalik/wiktionary | 02ee1f1327c3b82fc7b4d7da12083b1431b1eb8b | 8edae2f7dcf9089084c5ce7033c4fb0b454f4dfa | refs/heads/master | 2023-02-06T11:28:41.554604 | 2023-02-05T22:49:01 | 2023-02-05T22:49:01 | 121,025,447 | 7 | 2 | null | 2021-10-13T17:36:32 | 2018-02-10T15:06:24 | Lua | UTF-8 | Python | false | false | 486 | py | from libs.parse.groupers.sections.blocks.any_blocks import AnyBlocksGrouper
from libs.parse.groupers.sections.blocks.blocks import BlocksGrouper
from libs.parse.sections.grouper_mixins.sub_blocks import SubBlocksGroupersMixin
from libs.parse.utils.decorators import parsed
class BlocksGroupersMixin(SubBlocksGroupersMixin):
@property
@parsed
def blocks(self):
return BlocksGrouper(self)
@parsed
def any_blocks(self):
return AnyBlocksGrouper(self)
| [
"[email protected]"
] | |
2e6669b326e3edda7be0bf7c377f290405bcf0c3 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2671/60590/241334.py | 25e5fee553d7db62878bfa4165d65ebdfde7331c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | def cal(w):
if w==1:
return 2
if w==2:
return 3
elif w==3:
return 5
else:
w2=2
w3=3
for i in range(w-2):
temp=w2+w3
w2=w3
w3=temp
return temp
t = int(input())
for i in range(t):
w=int(input())
print(2**w-cal(w)) | [
"[email protected]"
] | |
e1267a54f015e66aaf57df060a0ebb302d36b67e | 8d4f26bccc3b016cf45e8270df617cea73d1a741 | /utils/transforms.py | 4a3b38f5fe3802f8719e83a981a8f9c1740e3a2c | [] | no_license | krylatov-pavel/aibolit-ECG | 3b6e4fc8d87ada6a615038c7fb94048570af2d43 | 27bad875981547ea93ac0088518eb29149078988 | refs/heads/master | 2022-12-26T05:49:30.827061 | 2019-08-19T10:47:20 | 2019-08-19T10:47:20 | 191,343,111 | 1 | 0 | null | 2022-12-08T05:56:08 | 2019-06-11T09:53:21 | Python | UTF-8 | Python | false | false | 631 | py | import torch
from torchvision import transforms
def squeeze(x):
return torch.squeeze(x, dim=0)
def clip_fn(min, max):
def clip(x):
x = torch.clamp(x, min, max)
return x
return clip
def scale_fn(min, max, a, b):
def scale(x):
x = ((b - a) * (x - min) / (max - min)) + a
return x
return scale
def get_transform():
clip = clip_fn(-19, 21)
scale = scale_fn(-19, 21, 0, 5)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(clip),
transforms.Lambda(scale),
transforms.Lambda(squeeze)
])
return transform | [
"[email protected]"
] | |
8abbdd180f33166add8aa0e2afc8656a3e61eb68 | 198dd2fd5b2aa27b950bd5844c97a1ebdbd3af17 | /dephell/repositories/_local.py | 90ceb7d1f9c1045a3bc1c9a026ebe0a9eea2cb71 | [
"MIT"
] | permissive | espdev/dephell | 68411b20c1830836dcea0eec96a8bd15e95171d5 | 17d5604e7b443b4d58bffc635a139adb49431efc | refs/heads/master | 2020-11-26T01:05:07.580285 | 2019-12-20T14:29:07 | 2019-12-20T14:29:07 | 228,915,765 | 0 | 0 | MIT | 2019-12-18T20:24:21 | 2019-12-18T20:24:20 | null | UTF-8 | Python | false | false | 4,172 | py | # built-in
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, Union
# app
from ..cache import RequirementsCache
from ..config import Config
from ..constants import FILES
from ..models.release import Release
from ._warehouse import WarehouseLocalRepo
from .base import Interface
class LocalRepo(Interface):
def __init__(self, path: Union[Path, str]):
if type(path) is str:
path = Path(path)
self.path = path
def get_releases(self, dep) -> Tuple[Release, ...]:
releases = []
dist_path = (self.path / 'dist')
if dist_path.exists():
repo = WarehouseLocalRepo(name='tmp', path=dist_path)
releases = list(repo.get_releases(dep=dep))
root = self.get_root(name=dep.name, version='0.0.0')
self.update_dep_from_root(dep=dep, root=root)
releases.append(Release(
raw_name=root.raw_name,
version=root.version,
time=datetime.fromtimestamp(self.path.stat().st_mtime),
))
return tuple(reversed(releases))
async def get_dependencies(self, name: str, version: str, extra: Optional[str] = None) -> tuple:
cache = RequirementsCache('local', 'deps', name, str(version))
deps = cache.load()
if deps:
return deps
root = self.get_root(name=name, version=version)
deps = root.dependencies
if extra:
deps = tuple(dep for dep in deps if extra in dep.envs)
cache.dump(root=root)
return deps
def get_root(self, name: str, version: str):
from ..converters import EggInfoConverter, SDistConverter, WheelConverter, CONVERTERS
if not self.path.exists():
raise FileNotFoundError(str(self.path))
# load from file
if self.path.is_file():
for converter in CONVERTERS.values():
if converter.can_parse(path=self.path):
return converter.load(path=self.path)
raise LookupError('cannot find loader for file ' + str(self.path))
# get from wheel or sdist
patterns = (
('-*-*-*.whl', WheelConverter()),
('.tar.gz', SDistConverter()),
('.tgz', SDistConverter()),
)
for suffix, converter in patterns:
paths = tuple(self.path.glob('**/{name}-{version}{suffix}'.format(
name=name.replace('-', '_'),
version=str(version),
suffix=suffix,
)))
if paths:
path = min(paths, key=lambda path: len(path.parts))
return converter.load(path=path)
# read from egg-info
path = self.path / (name + '.egg-info')
if path.exists():
return EggInfoConverter().load(path=path)
# read from dephell config
path = self.path / 'pyproject.toml'
if path.exists():
config = Config().attach_file(path=path, env='main')
if config is not None:
section = config.get('to') or config.get('from')
if section and 'path' in section and 'format' in section:
converter = CONVERTERS[section['format']]
path = self.path.joinpath(section['path'])
return converter.load(path)
# get from dependencies file
for fname in FILES:
path = self.path / fname
if not path.exists():
continue
for converter in CONVERTERS.values():
if converter.can_parse(path=path):
return converter.load(path=path)
raise LookupError('cannot find dependencies in ' + str(self.path))
@staticmethod
def update_dep_from_root(dep, root) -> None:
if not dep.description:
dep.description = root.description
if not dep.authors:
dep.authors = root.authors
if not dep.links:
dep.links = root.links
if not dep.classifiers:
dep.classifiers = root.classifiers
if not dep.license:
dep.license = root.license
| [
"[email protected]"
] | |
635ba9cef3c47552319481c624406b556a3d4b17 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=0/params.py | aff20aa8732740d3ec888ec12ac9538f67a70364 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.570619',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 0,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
81ead41af15f2e458481b49604b4fc00b30f8ecc | 9cbd22ce203ab7f40d6e02a7ee2b565461369b45 | /bagbankde/items.py | 768a82202c55a20a6a785aed852583aebad99500 | [] | no_license | hristo-grudev/bagbankde | a506ed6af28db7ad4c609d7fbd922d5a699b64d6 | 1afcb0454b9e498c4b4eccb233b7d2aa15823513 | refs/heads/main | 2023-03-26T14:09:08.641400 | 2021-03-18T14:16:31 | 2021-03-18T14:16:31 | 349,100,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import scrapy
class BagbankdeItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
| [
"[email protected]"
] | |
7d42995cc032265dc1da6c26ba81455cc32bcebd | c60c199410289c1d7ec4aea00833b461e1f08f88 | /.history/older-than/older/source-example/day2/user-list.py | a923e529b041db39bfa93f7bc43cb926236f86e4 | [] | no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | #!/usr/bin/env python
users = [ line.split(':')[0] for line in open('/etc/passwd') if '#' not in line and '!' in line ]
users.sort()
for (i, n) in enumerate(users):
print i, ":", n
| [
"[email protected]"
] | |
0933da67fd790e5811ce8b580f16a0ab1a3f6a75 | 32bbbd6dbd100bbb9a2282f69ac3b7b34516347f | /Study/keras/keras44_cifar100_2_cnn.py | 88e3f8742ac013b4f6a6c64966e550971666ddae | [] | no_license | kimjh1753/AIA_Academy_Study | 2162d4d4f1a6b8ca1870f86d540df45a8742f359 | 6022718ae7f9e5170a19c4786d096c8042894ead | refs/heads/master | 2023-05-07T12:29:12.920693 | 2021-06-05T01:09:33 | 2021-06-05T01:09:33 | 324,136,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | # 1. 데이터
from tensorflow.keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
print(x_train.shape, y_train.shape) # (50000, 32, 32, 3) (50000, 1)
print(x_test.shape, y_test.shape) # (10000, 32, 32, 3) (10000, 1)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3])/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3])/255.
# OneHotEncoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape, y_test.shape) # (50000, 100) (10000, 100)
# 2. 모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=10, kernel_size=(2,2), padding='same',
strides=1, input_shape=(32, 32, 3)))
model.add(Dropout(0.2))
model.add(Conv2D(9, (2,2), padding='valid'))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1000, activation='relu'))
model.add(Dense(100, activation='softmax'))
# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=30, mode='auto')
model.fit(x_train, y_train, epochs=2000, batch_size=2000, validation_split=0.2, verbose=1, callbacks=[es])
# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=1)
print("loss : ", loss)
print("loss : ", acc)
# keras cifar100 cnn
# loss : 5.992544174194336
# loss : 0.23280000686645508 | [
"[email protected]"
] | |
6c043811b2da3f373efa06bc8156705996b15ee9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_never.py | d9002c9003bf7b8c0007df237bda667fddc3bf4d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py |
#calss header
class _NEVER():
def __init__(self,):
self.name = "NEVER"
self.definitions = [u'not at any time or not on any occasion: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
6a1e6c2874181f6c5859c830e394359834617163 | 747f759311d404af31c0f80029e88098193f6269 | /extra-addons/training_doc/__init__.py | 4cb47ad014a13cc816addb240e952f246358cbea | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jesús Martín <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import training_doc
import wizard
| [
"[email protected]"
] | |
239de711038d222c388248ee584d39770975bd23 | d53bc632503254ca0d5099fe457c02c07212a131 | /middleware1/testApp/middleware.py | acbf1c0c7c41b9e5d9f591e64615887737e2f158 | [] | no_license | srikar1993/django | ba8428f6e1162cc40f2d034126e7baf29eb62edc | 2199d5d94accc7bce5b3fac4a4b7b1444e39b35f | refs/heads/master | 2023-07-14T21:10:52.654992 | 2021-08-26T06:37:04 | 2021-08-26T06:37:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,950 | py | from django.http import HttpResponse
class ExecutionFlowMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Pre processing of request
print('This line is printed at pre-processing of request...')
# Forwarding the request to next level
response = self.get_response(request)
# Post processing of request
print('This line is printed at post-processing of request...')
return response
class AppMaintainanceMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return HttpResponse('<h1>Application Under Maintainance...</h1><h1>Please Try Again After Some Time</h1>')
class ErrorMessageMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_exception(self, request, exception):
mymsg = '''
<h1>Please check the input you have provided...</h1><hr>
'''
exceptioninfo = f'''
<h1>Raised Exception: {exception.__class__.__name__}</h1>
<h1>Exception Message: {exception}</h1>
'''
return HttpResponse(mymsg + exceptioninfo)
class FirstMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Pre processing of request
print('This line is printed by First Middleware at pre-processing of request...')
# Forwarding the request to next level
response = self.get_response(request)
# Post processing of request
print('This line is printed by First Middleware at post-processing of request...')
return response
class SecondMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Pre processing of request
print('This line is printed by Second Middleware at pre-processing of request...')
# Forwarding the request to next level
response = self.get_response(request)
# Post processing of request
print('This line is printed by Second Middleware at post-processing of request...')
return response
class ThirdMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Pre processing of request
print('This line is printed by Third Middleware at pre-processing of request...')
# Forwarding the request to next level
response = self.get_response(request)
# Post processing of request
print('This line is printed by Third Middleware at post-processing of request...')
return response
| [
"[email protected]"
] | |
6d13ea298a8c4814de41ef50e5ca2ebf16d19711 | c9b5a2cd00764ee4a0b889b5b602eb28fd08e989 | /python/238-Product of Array Except Self.py | 600d5102cd52a0453255a55f78ed445ca39932d5 | [] | no_license | cwza/leetcode | 39799a6730185fa06913e3beebebd3e7b2e5d31a | 72136e3487d239f5b37e2d6393e034262a6bf599 | refs/heads/master | 2023-04-05T16:19:08.243139 | 2021-04-22T04:46:45 | 2021-04-22T04:46:45 | 344,026,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from typing import List
class Solution:
# def productExceptSelf(self, nums: List[int]) -> List[int]:
# "Precompute L, R product, Time: O(n), Space: O(n)"
# n = len(nums)
# # L[i]: product of nums[0, i-1]
# L = [1]*n
# for i in range(1, n):
# L[i] = L[i-1] * nums[i-1]
# # R[i]: product of nums[i+1, n]
# R = [1]*n
# for i in reversed(range(n-1)):
# R[i] = R[i+1] * nums[i+1]
# # print(L, R)
# result = [1]*n
# for i in range(n):
# result[i] = L[i] * R[i]
# return result
def productExceptSelf(self, nums: List[int]) -> List[int]:
"Use output list as R and Compute L on the fly, Time: O(n), Space: O(1) exclude space used by output"
n = len(nums)
R = [1]*n # This is also be used as result list
for i in reversed(range(n-1)):
R[i] = R[i+1] * nums[i+1]
cur_L = 1
for i in range(n):
R[i] = cur_L * R[i]
cur_L = cur_L * nums[i]
return R
nums = [2,3,4,5]
result = Solution().productExceptSelf(nums)
assert result == [60,40,30,24]
nums = [2,3,0,5]
result = Solution().productExceptSelf(nums)
assert result == [0,0,30,0] | [
"[email protected]"
] | |
4c6c5d18a00823a83ef35c263e076351815ec55a | 98591a80b7881385dc15a7aee3298aed68efbc32 | /MODEL1302010025/model.py | 7776531980fb768cf4985182c7a6bdc908a3c3e7 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/MODEL1302010025 | 9f49612839a3c29dd8034bf17a58a6caa3e1a8eb | 852113c7356661180c266a701e56dc8bc789a898 | refs/heads/master | 2020-12-24T14:44:47.764710 | 2014-10-16T05:57:03 | 2014-10-16T05:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1302010025.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"[email protected]"
] | |
9f281fc9d686425e97b54cdc34eb570c1fe19b42 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s208022099.py | 7c3413420f8ed9fc0b8a40a4b007da745e363f1f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from sys import stdin
def main():
n = int(stdin.readline())
a = list(map(int, stdin.readline().split()))
a2 = [[a[i], i] for i in range(n)]
a2.sort(reverse=True)
dp = [[0 for _ in range(n + 1)] for _ in range(n + 1)]
ans = 0
for i in range(n + 1):
for j in range(n + 1 - i):
s1 = s2 = 0
if i > 0:
s1 = dp[i - 1][j] + a2[i + j - 1][0] * (a2[i + j - 1][1] - (i - 1))
if j > 0:
s2 = dp[i][j - 1] + a2[i + j - 1][0] * ((n - j) - a2[i + j - 1][1])
dp[i][j] = max(s1, s2)
ans = max(ans, dp[i][n - i])
print(ans)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
40125cfd752c5de08af72a21e324f89b505db21d | 2603f28e3dc17ae2409554ee6e1cbd315a28b732 | /ABC38/prob_c.py | f33438f7f66d9238ccdeb20f211ec805df4b4225 | [] | no_license | steinstadt/AtCoder | 69f172280e89f4249e673cae9beab9428e2a4369 | cd6c7f577fcf0cb4c57ff184afdc163f7501acf5 | refs/heads/master | 2020-12-23T12:03:29.124134 | 2020-11-22T10:47:40 | 2020-11-22T10:47:40 | 237,144,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # Problem C - 単調増加
# input
N = int(input())
a_list = list(map(int, input().split()))
# initialization
ans = 0
# shakutori count
right = 0
for left in range(N):
while (right<N and a_list[right-1]<a_list[right]) or left==right:
right += 1
ans += right - left
if left==right:
right += 1
# output
print(ans)
| [
"[email protected]"
] | |
12d4e303ec37dc162f5cd4b655c882bf2ae8429b | 0b77f11bfb68d465e99fdfcea8bef63013409df8 | /reports/views.py | e7046593d76822fccfcdfe0b0bab740325b0bb42 | [] | no_license | dbsiavichay/furb | dea1de7d3085bd41a668a6581a4997ff50a58afe | 36dea81c23d614bceaf35b38a5861a2ca095ea98 | refs/heads/master | 2020-06-28T06:05:42.313533 | 2019-03-14T15:37:20 | 2019-03-14T15:37:20 | 74,506,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,486 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView, TemplateView
from wildlife.models import Animal, Kind
from location.models import Parish
from django.conf import settings
from os.path import isfile, join
from io import BytesIO
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4, landscape
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import SimpleDocTemplate, Paragraph, TableStyle, Table, Image
from reportlab.lib.units import cm, mm
from wildlife.views import get_letterhead_page, NumberedCanvas
class ParishListView(ListView):
model = Parish
template_name = 'reports/animal_by_parish.html'
queryset = Parish.objects.filter(canton_code='1401')
class StatsListView(ListView):
model = Parish
template_name = 'reports/animal_stats.html'
queryset = Parish.objects.filter(canton_code='1401')
def get_context_data(self, **kwargs):
from datetime import date
context = super(StatsListView, self).get_context_data(**kwargs)
years = range(2017, date.today().year + 1)
months = [
(1, 'ENERO'),
(2, 'FEBRERO'),
(3, 'MARZO'),
(4, 'ABRIL'),
(5, 'MAYO'),
(6, 'JUNIO'),
(7, 'JULIO'),
(8, 'AGOSTO'),
(9, 'SEPTIEMBRE'),
(10, 'OCTUBRE'),
(11, 'NOVIEMBRE'),
(12, 'DICIEMBRE'),
]
context.update({
'months': months,
'years':years,
})
return context
def get_by_parish(request, parish):
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename=reporte.pdf'
sterilized = request.GET.get('sterilized', False)
pdf = get_animal_by_parish_report(parish, sterilized)
response.write(pdf)
return response
def get_animal_stats(request, month, year):
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename=reporte.pdf'
pdf = get_chart_by_month(month, year)
response.write(pdf)
return response
def get_animal_by_parish_report(parish, sterilized):
animals = Animal.objects.filter(parish=parish).order_by('owner')
if sterilized: animals = animals.filter(want_sterilize=True)
buff = BytesIO()
doc = SimpleDocTemplate(buff,pagesize=A4,rightMargin=60, leftMargin=40, topMargin=75, bottomMargin=50,)
styles = getSampleStyleSheet()
path = join(settings.BASE_DIR, 'static/assets/report/checked.png')
report = [
Paragraph("DIRECCIÓN DE GESTION AMBIENTAL Y SERVICIOS PÚBLICOS", styles['Title']),
Paragraph("REPORTE DE FAUNA POR PARROQUIA", styles['Title']),
]
tstyle = TableStyle([
('LINEBELOW',(0,0),(-1,-1),0.1, colors.gray),
('TOPPADDING',(0,0),(-1,-1), 5),
('BOTTOMPADDING',(0,0),(-1,-1), 0),
('LEFTPADDING',(0,0),(-1,-1), 0),
('RIGHTPADDING',(0,0),(-1,-1), 0),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
])
# tstyle = TableStyle([
# ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
# ('LEFTPADDING',(0,0),(-1,-1), 3),
# ('RIGHTPADDING',(0,0),(-1,-1), 3),
# ('BOTTOMPADDING',(0,0),(-1,-1), 0),
# ('BOX', (0, 0), (-1, -1), 0.5, colors.black),
# ('ALIGN',(0,0),(0,-1),'RIGHT'),
# ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
# ('BACKGROUND', (0, 0), (-1, 0), colors.gray)
# ])
headingStyle = styles['Heading5']
headingStyle.fontSize = 6
contentStyle = styles['BodyText']
contentStyle.fontSize = 5
columns_width = [0.5*cm, 1.4*cm, 2.5*cm,1.2*cm,1.8*cm,1.5*cm,4.5*cm,3*cm]
headings = ('N°', 'CÓDIGO', 'NOMBRE','ESPECIE', 'ESTERILIZAR?', 'UBICACIÓN', 'PROPIETARIO', 'CONTACTO')
headings = (Paragraph(h, headingStyle) for h in headings)
content = [(
Paragraph(str(index + 1), contentStyle),
Paragraph(animal.code, contentStyle),
Paragraph(animal.name.title(), contentStyle),
Paragraph(animal.breed.kind.name.title(), contentStyle),
Image(path, width=2.5*mm, height=2.5*mm) if animal.want_sterilize else Paragraph('', contentStyle),
Paragraph(animal.get_parish_name().title(), contentStyle),
Paragraph(animal.get_owner_name().title(), contentStyle),
Paragraph(animal.get_owner_contact().title(), contentStyle),
) for index, animal in enumerate(animals)] if len(animals) else [('Sin datos.',)]
table = Table([headings] + content, columns_width, style=tstyle, )
report.append(table)
doc.build(report,canvasmaker=NumberedCanvas,onFirstPage=get_letterhead_page,onLaterPages=get_letterhead_page)
return buff.getvalue()
def get_chart_by_month(month, year):
buff = BytesIO()
months = [
'ENERO','FEBRERO','MARZO','ABRIL',
'MAYO','JUNIO','JULIO','AGOSTO',
'SEPTIEMBRE','OCTUBRE','NOVIEMBRE','DICIEMBRE',
]
doc = SimpleDocTemplate(buff,pagesize=A4,rightMargin=60, leftMargin=40, topMargin=75, bottomMargin=50,)
styles = getSampleStyleSheet()
report = [
Paragraph("DIRECCIÓN DE GESTION AMBIENTAL Y SERVICIOS PÚBLICOS", styles['Title']),
Paragraph('REPORTE ESTADISTICO %s %s' % (months[int(month)-1],year), styles['Title']),
]
parishes = Parish.objects.filter(canton_code='1401')
kinds = Kind.objects.all()
for kind in kinds:
_animals = Animal.objects.filter(
breed__kind=kind,
date_joined__year = int(year),
date_joined__month = int(month)
)
data = []
labels = []
for parish in parishes:
animals = _animals.filter(parish=parish.code)
if len(animals) > 0:
percent = (len(animals) * 100.00) / len(_animals)
data.append(len(animals))
labels.append('%s (%0.2f%%)' % (parish.name.encode('utf-8'), percent))
if len(data) > 0:
report.append(Paragraph(kind.name, styles['Heading3']))
chart = create_pie_chart(data, labels, True)
report.append(chart)
doc.build(report,canvasmaker=NumberedCanvas,onFirstPage=get_letterhead_page,onLaterPages=get_letterhead_page)
return buff.getvalue()
colores = [
colors.HexColor('#7fffd4'),
colors.HexColor('#0000ff'),
colors.HexColor('#a52a2a'),
colors.HexColor('#ff7f50'),
colors.HexColor('#a9a9a9'),
colors.HexColor('#008b8b'),
colors.HexColor('#8b0000'),
colors.HexColor('#ff00ff'),
colors.HexColor('#00008b'),
colors.HexColor('#008000'),
colors.HexColor('#adff2f'),
colors.HexColor('#00ff00'),
colors.HexColor('#ff00ff'),
colors.HexColor('#ffa500'),
colors.HexColor('#ff0000'),
colors.HexColor('#ee82ee'),
colors.HexColor('#ffff00'),
]
def add_legend(draw_obj, chart, data):
from reportlab.graphics.charts.legends import Legend
from reportlab.lib.validators import Auto
legend = Legend()
legend.alignment = 'right'
legend.x = 90
legend.y = 50
legend.colorNamePairs = [(chart.slices[i].fillColor, (chart.labels[i].split('(')[0], '%s' % chart.data[i])) for i in range(0, len(data))]
draw_obj.add(legend)
def create_pie_chart(data, labels, legend=False):
from reportlab.graphics.charts.piecharts import Pie
from reportlab.graphics.shapes import Drawing
d = Drawing(250, 275)
pie = Pie()
# required by Auto
pie._seriesCount = len(data)
pie.x = 175
pie.y = 100
pie.width = 150
pie.height = 150
pie.data = data
pie.labels = labels
pie.simpleLabels = 0
pie.sideLabels = True
pie.slices.strokeWidth = 0.5
for i in range (0, len(colores)):
pie.slices[i].fillColor = colores[i]
if legend:
add_legend(d, pie, data)
d.add(pie)
#d.save(formats=['pdf'], outDir='.', fnRoot='test-pie')
return d
| [
"[email protected]"
] | |
53ef131a0b9babc5af8fa15c91c4fca6cc7de93c | 69c882c678103b182988fb60d3e898d569980f1c | /Day 4/day4prog4.py | 5f6224da0ba42f841f9b5541f0a3d0a63e87733b | [] | no_license | gittygupta/stcet-python | 44be9d91cdd6215879d9f04497214819228821be | e77456172746ee76b6e2a901ddb0c3dbe457f82a | refs/heads/master | 2022-03-05T11:37:08.720226 | 2019-12-01T00:56:03 | 2019-12-01T00:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | n=2
while n<1000:
flag=1
for i in range (2,n):
if n%i==0:
flag=0
if flag==1:
print(n)
n+=1
| [
"[email protected]"
] | |
3457d5a9fc1cb829b5810e28cb19b670b4a2c408 | 79f42fd0de70f0fea931af610faeca3205fd54d4 | /base_lib/ChartDirector/pythondemo_cgi/finance2.py | d21cccb185021c83f32e480286c061357e3302a6 | [
"IJG"
] | permissive | fanwen390922198/ceph_pressure_test | a900a6dc20473ae3ff1241188ed012d22de2eace | b6a5b6d324e935915090e791d9722d921f659b26 | refs/heads/main | 2021-08-27T16:26:57.500359 | 2021-06-02T05:18:39 | 2021-06-02T05:18:39 | 115,672,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | #!/usr/bin/python
from FinanceChart import *
# Create a finance chart demo containing 100 days of data
noOfDays = 100
# To compute moving averages starting from the first day, we need to get extra data points before
# the first day
extraDays = 30
# In this exammple, we use a random number generator utility to simulate the data. We set up the
# random table to create 6 cols x (noOfDays + extraDays) rows, using 9 as the seed.
rantable = RanTable(9, 6, noOfDays + extraDays)
# Set the 1st col to be the timeStamp, starting from Sep 4, 2002, with each row representing one
# day, and counting week days only (jump over Sat and Sun)
rantable.setDateCol(0, chartTime(2002, 9, 4), 86400, 1)
# Set the 2nd, 3rd, 4th and 5th columns to be high, low, open and close data. The open value starts
# from 100, and the daily change is random from -5 to 5.
rantable.setHLOCCols(1, 100, -5, 5)
# Set the 6th column as the vol data from 5 to 25 million
rantable.setCol(5, 50000000, 250000000)
# Now we read the data from the table into arrays
timeStamps = rantable.getCol(0)
highData = rantable.getCol(1)
lowData = rantable.getCol(2)
openData = rantable.getCol(3)
closeData = rantable.getCol(4)
volData = rantable.getCol(5)
# Create a FinanceChart object of width 640 pixels
c = FinanceChart(640)
# Add a title to the chart
c.addTitle("Finance Chart Demonstration")
# Set the data into the finance chart object
c.setData(timeStamps, highData, lowData, openData, closeData, volData, extraDays)
# Add a slow stochastic chart (75 pixels high) with %K = 14 and %D = 3
c.addSlowStochastic(75, 14, 3, 0x006060, 0x606000)
# Add the main chart with 240 pixels in height
c.addMainChart(240)
# Add a 10 period simple moving average to the main chart, using brown color
c.addSimpleMovingAvg(10, 0x663300)
# Add a 20 period simple moving average to the main chart, using purple color
c.addSimpleMovingAvg(20, 0x9900ff)
# Add candlestick symbols to the main chart, using green/red for up/down days
c.addCandleStick(0x00ff00, 0xff0000)
# Add 20 days donchian channel to the main chart, using light blue (9999ff) as the border and
# semi-transparent blue (c06666ff) as the fill color
c.addDonchianChannel(20, 0x9999ff, 0xc06666ff)
# Add a 75 pixels volume bars sub-chart to the bottom of the main chart, using green/red/grey for
# up/down/flat days
c.addVolBars(75, 0x99ff99, 0xff9999, 0x808080)
# Append a MACD(26, 12) indicator chart (75 pixels high) after the main chart, using 9 days for
# computing divergence.
c.addMACD(75, 26, 12, 9, 0x0000ff, 0xff00ff, 0x008000)
# Output the chart
print("Content-type: image/png\n")
binaryPrint(c.makeChart2(PNG))
| [
"[email protected]"
] | |
49982cbda6186d5804468863bfc7a8d00d46ef96 | cac155c4a39b902213fe9efe39dbe761afb00a40 | /回溯法/leetcode/排列问题/leetcode_46_permute.py | 068c120de650f947bde4374dd12e8327b69c7a1c | [] | no_license | songyingxin/python-algorithm | 51c8d2fc785ba5bc5c3c98a17dce33cbced8cb99 | 4b1bebb7d8eb22516119acc921dfc69a72420722 | refs/heads/master | 2022-06-29T05:04:14.300542 | 2022-05-22T10:11:34 | 2022-05-22T10:11:34 | 164,998,626 | 331 | 72 | null | null | null | null | UTF-8 | Python | false | false | 696 | py |
# permute(nums[0...n-1]) = (取出一个数字) + permute(nums[0...n-1] - 这个数字)
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def backtracking(nums, item):
if not nums:
result.append(item)
return
for index in range(len(nums)):
tmp = nums[:index] + nums[index+1:]
tmp_item = item[:]
tmp_item.append(nums[index])
backtracking(tmp, tmp_item)
result = []
backtracking(nums, [])
return result
if __name__ == "__main__":
nums = [1,2,3]
print(Solution().permute(nums))
| [
"[email protected]"
] | |
6f83f7dc50cbc2028bf2c6b1e578b94c2a593cb0 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_12/ar_/test_artificial_1024_Quantization_Lag1Trend_12__100.py | 009dead988ececca3e907ac0f1dc2250b82392ff | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
] | |
91435fe0101353d14673c598094ce8d75d7b6780 | 26c019f7dadceaf773cd292d7364582bc2a278d2 | /user_app/tests/interactors/raw_inputs.py | 0b9d1fcc1eb22362432905c8446daa399ba983be | [] | no_license | DilLip-Chowdary-Codes/Backend_Mini_Projects | 289d5213a1c62d5b2ab26397e0d684632b139ad1 | f69dc6e9de4d621b782b703f2aa41cd126d8a58b | refs/heads/master | 2022-11-12T02:09:36.600636 | 2020-07-09T15:05:21 | 2020-07-09T15:05:21 | 272,417,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | from user_app.dtos\
import TaskDetailsDto, ProjectDto,\
StateDto, UserDto, ProjectDetailsDto,\
TaskDto, TransitionDetailsDto, ChecklistDetailsDto,\
UpdateTransitionInputDto, ChecklistStatusDto
project_data = {
"name": "projectManagement",
"description": "it's a blaw blaw blaw blaw blaw blaw ",
"workflow_id": 1,
"project_type": "Classic Software",
"developers": [1]
}
task_data = {
"project_id": 1,
"issue_type": "Enhancement",
"title": "Optimizing DB",
"description": "string",
"state_id": 1
}
user_dto = UserDto(
user_id=1,
username="username_1",
profile_pic="http://www.google.com",
phone_no="8739835635",
is_admin=True
)
developer_dto = UserDto(
user_id=2,
username="username_2",
profile_pic="http://www.google.com",
phone_no="8739835635",
is_admin=False
)
state_dto = StateDto(
name="In Progress")
state_2_dto = StateDto(
name="New State")
project_dto = ProjectDto(
name="projectManagement",
description="it's a blaw blaw blaw blaw blaw blaw ",
workflow_id=1,
project_type="Classic Software",
developers=[1]
)
project_details_dto = ProjectDetailsDto(
project_id=1,
name="projectManagement",
description="it's a blaw blaw blaw blaw blaw blaw ",
workflow="",
project_type="Classic Software",
created_by=user_dto,
created_at="2020-05-28 10:06:23",
developers=[developer_dto]
)
task_dto = TaskDto(
project_id=1,
issue_type="Enhancement",
title="Optimizing DB",
description="string",
state_id=1)
task_details_dto = TaskDetailsDto(
task_id=1,
project=project_details_dto,
issue_type="Enhancement",
title="Optimizing DB",
assignee=user_dto,
description="string",
state=state_dto
)
tasks_dtos = [task_dto]
tasks_details_dtos = [task_details_dto]
#task_transition
checklist_input_dict= {
"checklist_id": 1,
"is_checked": True
}
checklist_input_dict_2 = {
"checklist_id": 2,
"is_checked": False
}
checklist_input_dicts_list = [
checklist_input_dict,
checklist_input_dict_2]
checklist_input_dict_unsatisfied_1 = {
"checklist_id": 1,
"is_checked": False
}
checklist_input_dicts_list_unsatisfied_mandatory_fields = [
checklist_input_dict_unsatisfied_1,
checklist_input_dict_2
]
checklist_status_dto = ChecklistStatusDto(
checklist_id=checklist_input_dict['checklist_id'],
is_checked=checklist_input_dict['is_checked'])
checklist_status_dto_2 = ChecklistStatusDto(
checklist_id=checklist_input_dict_2['checklist_id'],
is_checked=checklist_input_dict_2['is_checked'])
checklist_status_dtos_list = [
checklist_status_dto, checklist_status_dto_2
]
checklist_status_dto_unsatisfied = ChecklistStatusDto(
checklist_id=checklist_input_dict_unsatisfied_1['checklist_id'],
is_checked=checklist_input_dict_unsatisfied_1['is_checked'])
checklist_status_dtos_list_unsatisfied_mandatory_fields = [
checklist_status_dto_unsatisfied,
checklist_status_dto_2
]
update_task_state_input_data = {
"user_id": 1,
"project_id": 1,
"task_id": 1,
"from_state_id": 1,
"to_state_id": 2,
"checklist": checklist_input_dicts_list
}
update_task_state_input_data_with_unchecked_mandatory_checklist = {
"user_id": 1,
"project_id": 1,
"task_id": 1,
"from_state_id": 1,
"to_state_id": 2,
"checklist": checklist_input_dicts_list_unsatisfied_mandatory_fields
}
transition_details_query_dict = {
"project_id":1,
"task_id":1,
"to_state_id":2
}
task_state_data = {
"user_id": 1,
"project_id": 1,
"task_id": 1
}
from_state_id = task_dto.state_id
update_task_state_query_dto = UpdateTransitionInputDto(
project_id=update_task_state_input_data['project_id'],
task_id=update_task_state_input_data['task_id'],
from_state_id=from_state_id,
to_state_id=update_task_state_input_data['to_state_id'],
checklist=checklist_status_dtos_list)
update_task_state_query_dto_with_unchecked_mandatory_checklist\
= UpdateTransitionInputDto(
project_id=\
update_task_state_input_data_with_unchecked_mandatory_checklist[
'project_id'],
task_id=\
update_task_state_input_data_with_unchecked_mandatory_checklist[
'task_id'],
from_state_id=from_state_id,
to_state_id=\
update_task_state_input_data_with_unchecked_mandatory_checklist[
'to_state_id'],
checklist=checklist_status_dtos_list_unsatisfied_mandatory_fields
)
| [
"[email protected]"
] | |
8bde39144d8acee2bd36c7ff65890ffec18fda58 | f5f781ef988d4fa2868c923597a132018eb14041 | /build/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/cmake/op3_offset_tuner_msgs-genmsg-context.py | 9e14c02a4f494952599bc5018c54c563cbb5ddc4 | [] | no_license | greenechang/christmann_ws_2019fira | 701374a30059ee63faf62cfc8dae8ea783f6c078 | a1ba2846fe1326e54366627d8812fa1bf90c70e1 | refs/heads/master | 2022-11-15T20:55:15.891128 | 2020-07-15T09:52:17 | 2020-07-15T09:52:17 | 279,816,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/msg/JointOffsetData.msg;/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/msg/JointOffsetPositionData.msg;/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/msg/JointTorqueOnOff.msg;/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/msg/JointTorqueOnOffArray.msg"
services_str = "/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/srv/GetPresentJointOffsetData.srv"
pkg_name = "op3_offset_tuner_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "op3_offset_tuner_msgs;/home/robotis/christmann_ws/src/ROBOTIS-OP3-msgs/op3_offset_tuner_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
] | |
92476a49a95a1903850e0fff124bebc181d4136e | 32f61223ae8818f64922d69dc8d279428cd568e3 | /AlphaTwirl/AlphaTwirl/Loop/NullCollector.py | 212e63c87a8b257d129830e32131f74f42176eca | [
"BSD-3-Clause"
] | permissive | eshwen/cutflowirl | 959fdead7cc1f58e77e68074a9ee491c3259c6d6 | e20372dc3ce276c1db4e684b8e9f1e719b9e8e7d | refs/heads/master | 2020-04-05T11:20:49.000726 | 2017-11-23T16:15:10 | 2017-11-23T16:15:10 | 81,349,000 | 0 | 0 | null | 2017-11-23T16:15:11 | 2017-02-08T16:15:59 | Python | UTF-8 | Python | false | false | 382 | py | # Tai Sakuma <[email protected]>
##__________________________________________________________________||
class NullCollector(object):
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
def addReader(self, datasetName, reader):
pass
def collect(self):
pass
##__________________________________________________________________||
| [
"[email protected]"
] | |
ddb052d3917074619f88e5c250e223b616556c1b | 906c6abf6721303449a86c842a97193e86f1e88a | /sm/backup/NTCIR-Evaluation/src/GenerateXml.py | 30fa4354df107cf7417f11691a505ef644d9dd60 | [] | no_license | luochengleo/thuirwork | a5b5bedaa59dd94fde6c58d6c2ddba75fb99d374 | 2bf230949757401c15dee50249a0fa8aded595ad | refs/heads/master | 2020-04-13T12:49:03.752647 | 2014-08-31T08:37:52 | 2014-08-31T08:37:52 | 22,720,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | #coding=utf8
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from collections import defaultdict
import sys,csv
import codecs
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding("utf8")
def loadcsv(filename):
return csv.reader(open(filename))
id2topic = dict()
for l in open('../data/temp/IMine.Query.txt').readlines():
id,topic = l.replace(codecs.BOM_UTF8,'').strip().split('\t')
id2topic[id] = topic
print id,topic
def evaid():
rtr = []
for i in range(1,34,1):
if i <10:
rtr.append('000'+str(i))
else:
rtr.append('00'+str(i))
return rtr
sls2id = dict()
for l in open('../data/temp/slsposs.txt').readlines():
segs = l.strip().split('\t')
sls2id[segs[1]] = segs[0]
id2fls2sls2queries = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda:set())))
for l in loadcsv('../data/csv/task6.csv'):
query = l[1]
fls = l[3]
sls = l[4]
if sls in sls2id:
if query != '' and fls != '' and sls != '':
id = sls2id[sls]
id2fls2sls2queries[id][fls][sls].add(query)
id2fls = defaultdict(lambda:list())
id2flsposs = defaultdict(lambda:dict())
for l in open('../data/temp/flsposs.txt'):
segs = l.strip().split('\t')
id = segs[0]
fls = segs[1]
poss = float(segs[2])
id2fls[id].append(fls)
id2flsposs[id][fls] = poss
id2sls = defaultdict(lambda:list())
id2slsposs = defaultdict(lambda:dict())
for l in open('../data/temp/slsposs.txt'):
segs = l.strip().split('\t')
id = segs[0]
sls = segs[1]
poss = float(segs[2])
id2sls[id].append(sls)
id2slsposs[id][sls] = poss
for l in open('../data/temp/flsposs.txt'):
segs = ''
root = ET.Element('root')
for id in evaid():
topic = id2topic[id]
topicnode = ET.Element('topic',{'id':id,'content':topic})
for fls in id2fls[id]:
print id,fls
flsnode = ET.Element('fls',{'content':fls,'poss':str(id2flsposs[id][fls])})
for sls in id2sls[id]:
if sls in id2fls2sls2queries[id][fls]:
slsnode = ET.Element('sls',{'content':sls,'poss':str(id2slsposs[id][sls])})
for q in id2fls2sls2queries[id][fls][sls]:
expnode = ET.Element('example')
expnode.text = q
slsnode.append(expnode)
flsnode.append(slsnode)
topicnode.append(flsnode)
root.append(topicnode)
tree = ET.ElementTree(root)
tree.write('../data/test.xml','utf8')
| [
"[email protected]"
] | |
2a8a2fea5ef6b27e5ad95edd93fb19dddb4b601a | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005222.py | 554f9ec93f4279524a546fb6081ae0f1d20c7b74 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,004 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher68381(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.4.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher68381._instance is None:
CommutativeMatcher68381._instance = CommutativeMatcher68381()
return CommutativeMatcher68381._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 68380
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 68382
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 68383
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 68600
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.4.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 68601
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher68385.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 68386
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst1
if pattern_index == 1:
pass
# State 68602
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from .generated_part005223 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
e13db41af418a2896e05121c9e2d591d24eaa882 | 9b6b3f4b30e9bd8a821d8df16bd71e62b9c6eb98 | /day2/data_structs/conversion_4.py | eedfdf0af2d6d721cb076e44d17b34e8eb93b27a | [] | no_license | shobhit-nigam/snape_mar | b7f2155cfcd83482230c339fe45f9ea851061318 | b7b33a767cc00d35a22e40c940b4331e4898c8d5 | refs/heads/main | 2023-03-25T05:44:21.244078 | 2021-03-26T05:27:28 | 2021-03-26T05:27:28 | 350,555,721 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # sorted
avengers = {'captain':'shield', 'ironman':'suit', 'hulk':['smash', 'science'], 'black widow':'energy'}
xmen = ['mystique', 'magneto', 'wolverine']
dc = ('wonder woman', 'batman', 'flash')
stra = "hello"
print(list(stra))
print(sorted(stra))
print(sorted(dc))
print(sorted(xmen))
print(sorted(avengers))
| [
"[email protected]"
] | |
f20916f8c9c13c2a31dbcb18a07523b3185ae3d5 | aca8fc8c2a2de84e94f120e9ca8b12d152bc7cfa | /tests/test_fields_email.py | b9787d20611a2eea1172b40efdcc788bb790da58 | [] | no_license | habibutsu/yadm | de30b364edd40917b2b25457f76cec908f2ffd3d | b3b9f2fdd5987c718b9db600fd7881630bfef944 | refs/heads/master | 2022-12-14T22:14:57.190430 | 2019-03-20T15:52:13 | 2019-04-04T15:52:29 | 296,621,139 | 0 | 0 | null | 2020-09-18T12:55:49 | 2020-09-18T12:55:48 | null | UTF-8 | Python | false | false | 417 | py | import pytest
from yadm.documents import Document
from yadm.fields.email import EmailField, InvalidEmail
class Doc(Document):
e = EmailField()
def test_ok():
doc = Doc()
doc.e = '[email protected]'
assert doc.e == '[email protected]'
@pytest.mark.parametrize('bad_email', ['EmA.iL', 'E@mA@iL', 'EmAiL@'])
def test_error(bad_email):
doc = Doc()
with pytest.raises(InvalidEmail):
doc.e = bad_email
| [
"[email protected]"
] | |
c8e26e30e21138ec04c30db6579b6bd98a620898 | 55de20ff6a7b3e07cffae42d2d9b24178f65daf3 | /dockerhub_show_tags.py | 8c0568a7f12d0b05a91faf4c381b10a296ff8bb3 | [] | no_license | srjayep/pytools | 35f803f1adcc1e93f489475ee12c72ec10161649 | c96b752c7d8679e7dde1657914fa56bd9b4f2cfd | refs/heads/master | 2020-05-29T08:51:34.003012 | 2016-10-05T15:09:05 | 2016-10-05T15:09:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,117 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-05-10 11:26:49 +0100 (Tue, 10 May 2016)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Tool to show Docker tags for one or more DockerHub repos
Written for convenience as Docker CLI doesn't currently support this:
See https://github.com/docker/docker/issues/17238
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import logging
import os
import sys
import traceback
import urllib
try:
import requests
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, prog, isJson, jsonpp
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.3'
class DockerHubTags(CLI):
def __init__(self):
# Python 2.x
super(DockerHubTags, self).__init__()
# Python 3.x
# super().__init__()
self._CLI__parser.usage = '{0} [options] repo1 repo2 ...'.format(prog)
self.quiet = False
self.timeout_default = 30
def add_options(self):
self.add_opt('-q', '--quiet', action='store_true', default=False,
help='Output only the tags, one per line (useful for shell tricks)')
def run(self):
if not self.args:
self.usage('no repos given as args')
self.quiet = self.get_opt('quiet')
if not self.quiet:
print('\nDockerHub\n')
for arg in self.args:
self.print_tags(arg)
def print_tags(self, repo):
if not self.quiet:
print('repo: {0}'.format(repo))
print('tags: ', end='')
sys.stdout.flush()
indent = ' '
if self.quiet:
indent = ''
print('\n{0}'.format(indent).join(self.get_tags(repo)))
if not self.quiet:
print()
@staticmethod
def get_tags(repo):
namespace = 'library'
if '/' in repo:
(namespace, repo) = repo.split('/', 2)
url = 'https://registry.hub.docker.com/v2/repositories/{0}/{1}/tags/'\
.format(urllib.quote_plus(namespace), urllib.quote_plus(repo))
log.debug('GET %s' % url)
try:
verify = True
# workaround for Travis CI and older pythons - we're not exchanging secret data so this is ok
#if os.getenv('TRAVIS'):
# verify = False
req = requests.get(url, verify=verify)
except requests.exceptions.RequestException as _:
die(_)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
die("%s %s" % (req.status_code, req.reason))
if not isJson(req.content):
die('invalid non-JSON response from DockerHub!')
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(req.content))
print('='*80)
tag_list = []
try:
j = json.loads(req.content)
tag_list = [_['name'] for _ in j['results']]
except KeyError as _:
die('failed to parse output from DockerHub (format may have changed?): {0}'.format(_))
tag_list.sort()
# put latest to the top of the list
try:
tag_list.insert(0, tag_list.pop(tag_list.index('latest')))
except ValueError:
pass
return tag_list
if __name__ == '__main__':
DockerHubTags().main()
| [
"[email protected]"
] | |
779e7b1fc2bfe837f10a8070b3600f71ae8cdf3a | ece7ba486d29d4bc3e87c2046db2c31140e2d86a | /suitcase/mongo_normalized/tests/tests.py | 75f4046965a77899a78b88195844aeadf0dfc188 | [] | no_license | ke-zhang-rd/suitcase-mongo | 31b97bb13b9e6089248f888a6c33824b835de141 | c938bae589ab2fba301814c846c5d5339eb90fb8 | refs/heads/master | 2020-05-31T10:29:15.458932 | 2019-10-18T17:33:03 | 2019-10-18T17:33:03 | 190,241,607 | 0 | 0 | null | 2019-06-04T16:38:12 | 2019-06-04T16:38:11 | null | UTF-8 | Python | false | false | 451 | py | # Tests should generate (and then clean up) any files they need for testing. No
# binary files should be included in the repository.
from suitcase.mongo_normalized import Serializer
def test_export(db_factory, example_data):
documents = example_data()
metadatastore_db = db_factory()
asset_registry_db = db_factory()
serializer = Serializer(metadatastore_db, asset_registry_db)
for item in documents:
serializer(*item)
| [
"[email protected]"
] | |
39ac82b5900d8bff567825bc91b455a0be5074b1 | 4a7804ee05485c345b4e3c39a0c96ed4012542ac | /system/base/less/actions.py | fd506df396c789af3c5b6f73302212caf721a16d | [] | no_license | Erick-Pardus/Pardus | 1fef143c117c62a40e3779c3d09f5fd49b5a6f5c | 2693e89d53304a216a8822978e13f646dce9b1d3 | refs/heads/master | 2020-12-31T02:49:33.189799 | 2013-03-17T06:29:33 | 2013-03-17T06:29:33 | 17,247,989 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vfi")
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
#pisitools.dobin("less")
#pisitools.dobin("lessecho")
#pisitools.dobin("lesskey")
#pisitools.newman("lesskey.nro", "lesskey.1")
#pisitools.newman("less.nro", "less.1")
pisitools.dodoc("NEWS", "README", "COPYING")
| [
"[email protected]"
] | |
eda2d7a7d548e568bc5fb77caddeb16bfc3b87a0 | 861c248aab85784542fab84eeccedda6c90682d9 | /msgtracker/apps/collector.py | 57d9013ce26b082eb333ef71a105496cc2632ede | [
"MIT"
] | permissive | daleysoftware/msg-tracker | c91cd67e7466c04574c2ed5256a2a0f931dd8647 | 16edb9d555795d0eec625dd954e14f914cbbbe2b | refs/heads/master | 2022-05-16T02:58:44.083469 | 2017-03-01T23:43:24 | 2017-03-01T23:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | import sched
import time
import sys
import logging
import msgtracker
import datetime
import signal
scheduler = sched.scheduler(time.time, time.sleep)
def _collect_and_log_forever(slack_client):
"""
Collect data from slack API and log in redis. Backend handles logging format. Run forever.
"""
wait_minutes = msgtracker.constants.QUERY_INTERVAL_MINUTES
try:
logging.info("Collect and log sequence queued.")
sample_time = datetime.datetime.now()
logging.debug("Sample time for this collection round: %s" % sample_time.strftime('%s'))
for user in slack_client.get_active_users():
msgtracker.backend.log_active(user, sample_time)
except IOError as e:
wait_minutes = 1
logging.error("IO error during collection round, retry soon. Error: %s" % e)
# And enter on the scheduler to keep things rolling.
logging.info("Wait %s minutes." % wait_minutes)
scheduler.enter(wait_minutes * 60, 1, _collect_and_log_forever, argument=(slack_client,))
def signal_handler(signum, frame):
print() # Cosmetics.
logging.error("Received signal. Abort.")
sys.exit(1)
def main(slack_client):
"""
Main program. Kick off scheduler and run forever.
"""
signal.signal(signal.SIGINT, signal_handler)
scheduler.enter(0, 1, _collect_and_log_forever, argument=(slack_client,))
scheduler.run()
if __name__ == '__main__':
msgtracker.helper.logging.init()
logging.info("Starting collector service.")
main(msgtracker.endpoints.slack.Slack())
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.