blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cacff14e0b1b2678353ba2d462a5de00a04555a7 | 55ab4d0aecc49078e7a0f47a05457c9602327ed7 | /egs/madcat_arabic/v1/local/create_mask_from_page_image.py | b4147dcd3851a52f5a1a9319a6986519f66ac00b | [
"Apache-2.0"
] | permissive | aarora8/waldo | 56a171f0b2048d980023173ab38f5248db936eeb | ad08a05fa9e9890ad986f11d4bca3c773b228d87 | refs/heads/master | 2020-03-14T04:43:47.513263 | 2018-06-07T05:09:47 | 2018-06-07T05:09:47 | 131,447,076 | 0 | 0 | Apache-2.0 | 2018-04-28T22:00:19 | 2018-04-28T22:00:19 | null | UTF-8 | Python | false | false | 5,535 | py | #!/usr/bin/env python3
# Copyright 2018 Johns Hopkins University (author: Ashish Arora)
# Apache 2.0
""" This module will be used for creating text localization mask on page image.
Given the word segmentation (bounding box around a word) for every word, it will
extract line segmentation. To extract line segmentation, it will take word bounding
boxes of a line as input, will create a minimum area bounding box that will contain
all corner points of word bounding boxes. The obtained bounding box (will not necessarily
be vertically or horizontally aligned).
"""
import xml.dom.minidom as minidom
from waldo.data_manipulation import *
from waldo.core_config import CoreConfig
from waldo.mar_utils import compute_hull
from scipy.spatial import ConvexHull
from waldo.data_transformation import scale_down_image_with_objects, \
make_square_image_with_padding
def get_mask_from_page_image(madcat_file_path, image_file_name, max_size):
""" Given a page image, extracts the page image mask from it.
Input
-----
image_file_name (string): complete path and name of the page image.
madcat_file_path (string): complete path and name of the madcat xml file
corresponding to the page image.
"""
objects = _get_bounding_box(madcat_file_path)
img = Image.open(image_file_name).convert("RGB")
im_arr = np.array(img)
config = CoreConfig()
config.num_colors = 3
image_with_objects = {
'img': im_arr,
'objects': objects
}
im_height = im_arr.shape[0]
im_width = im_arr.shape[1]
validated_objects = []
for original_object in image_with_objects['objects']:
ordered_polygon_points = original_object['polygon']
object = {}
resized_pp = []
for point in ordered_polygon_points:
new_point = _validate_and_update_point(point, im_width, im_height)
resized_pp.append(new_point)
object['polygon'] = resized_pp
validated_objects.append(object)
validated_image_with_objects = {
'img': im_arr,
'objects': validated_objects
}
scaled_image_with_objects = scale_down_image_with_objects(validated_image_with_objects, config,
max_size)
img_padded = make_square_image_with_padding(scaled_image_with_objects['img'], 3, 255)
padded_image_with_objects = {
'img': img_padded,
'objects': scaled_image_with_objects['objects']
}
y = convert_to_mask(padded_image_with_objects, config)
return y
def _get_bounding_box(madcat_file_path):
""" Given word boxes of each line, return bounding box for each
line in sorted order
Input
-----
image_file_name (string): complete path and name of the page image.
madcat_file_path (string): complete path and name of the madcat xml file
corresponding to the page image.
"""
objects = []
doc = minidom.parse(madcat_file_path)
zone = doc.getElementsByTagName('zone')
for node in zone:
object = {}
token_image = node.getElementsByTagName('token-image')
mbb_input = []
for token_node in token_image:
word_point = token_node.getElementsByTagName('point')
for word_node in word_point:
word_coordinate = (int(word_node.getAttribute('x')), int(word_node.getAttribute('y')))
mbb_input.append(word_coordinate)
points = get_minimum_bounding_box(mbb_input)
points = tuple(points)
points_ordered = [points[index] for index in ConvexHull(points).vertices]
object['polygon'] = points_ordered
objects.append(object)
return objects
def _validate_and_update_point(pt0, im_width, im_height, pt1=(0, 0)):
new_point = pt0
if pt0[0] < 0:
new_point = _get_pointx_inside_origin(pt0, pt1)
if pt0[0] > im_width:
new_point = _get_pointx_inside_width(pt0, pt1, im_width)
if pt0[1] < 0:
new_point = _get_pointy_inside_origin(pt0, pt1)
if pt0[1] > im_height:
new_point = _get_pointy_inside_height(pt0, pt1, im_height)
return new_point
def _get_pointx_inside_origin(pt0, pt1):
""" Given a point pt0, return an updated point that is
inside orgin. It finds line equation and uses it to
get updated point x value inside origin
Returns
-------
(float, float): updated point
"""
return (0, pt0[1])
# TODO
def _get_pointx_inside_width(pt0, pt1, im_width):
""" Given a point pt0, return an updated point that is
inside image width. It finds line equation and uses it to
get updated point x value inside image width
Returns
-------
(float, float): updated point
"""
return (im_width, pt0[1])
# TODO
def _get_pointy_inside_origin(pt0, pt1):
""" Given a point pt0, return an updated point that is
inside orgin. It finds line equation and uses it to
get updated point y value inside origin
Returns
-------
(float, float): updated point
"""
return (pt0[0], 0)
# TODO
def _get_pointy_inside_height(pt0, pt1, im_height):
""" Given a point pt0, return an updated point that is
inside image height. It finds line equation and uses it to
get updated point y value inside image height
Returns
-------
(float, float): updated point
"""
return (pt0[0], im_height)
# TODO
| [
"[email protected]"
] | |
358e0825a1854b062e87d35611e52cd3c239266d | 21540ab033e180a3d94b270b7faffac7fe4af68f | /wordshop2/Project_01_10_page62-63/Project_05.py | e45ba58fc5058ea1e533a49592edf98b0103a792 | [] | no_license | tuan102081/wordshop1.2.3.5 | eaa344bdb04f565d1354b9476b4d4ecafc5cc7f3 | 70e75b56f48a2e5b1622d956f33831f80e64d368 | refs/heads/master | 2023-07-14T23:26:31.089484 | 2021-08-30T18:53:24 | 2021-08-30T18:53:24 | 401,411,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | """
Author: Nguyen Duy Tuan
Date: 29/08/2021
Program: project_05_page_62.py
Problem:
An object’s momentum is its mass multiplied by its velocity. Write a program
that accepts an object’s mass (in kilograms) and velocity (in meters per second) as
inputs and then outputs its momentum.
Solution:
Display:
Enter of mass(kg): 51
Enter of velocity(m/s): 60
Object’s momentum = 3060.0 (kgm/s)
"""
mass = float(input("Enter of mass(kg): "))
V = float(input("Enter of velocity(m/s): "))
M = mass * V
print("\nObject’s momentum = " + str(round(M, 2)) + " (kgm/s)")
| [
"[email protected]"
] | |
ec662f925b59e24fde024e4243aba389f33e0432 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/sieve-14.py | 52bce3a36228b57f2739edf857ed492498c0ab0c | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | # A resizable list of integers
class Vector(object):
items: [$ID] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
ba1688de52c9aed52049beab16fcbf7d463add7d | a04296ba7b09f3a7b7540a14e8ef6fcf683ed392 | /common/mergelist.py | dac26a7dac26dd95d77cfed036796fb50f267e7f | [
"MIT"
] | permissive | Hasi-liu/FXTest | 0a3acf9d27d9f784f378fc9f9c13deb9e678adbe | 150012f87021b6b8204fd342c62538c10d8dfa85 | refs/heads/master | 2023-05-11T00:27:57.224448 | 2020-05-10T02:58:11 | 2020-05-10T02:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | """
@author: lileilei
@file: mergelist.py
@time: 2018/1/29 13:55
"""
"""
list的合并
"""
def hebinglist(list1:list):
new=[]
for m in list1:
for h in m:
new.append(h)
return new
def listmax(list2:list):
list_int=[]
for i in list2:
try:
list_int.append(float(i))
except:
list_int.append(0)
nsm=0
for j in range(len(list_int)):
nsm+=float(list_int[j])
ma=max(list_int)
minx=min(list_int)
pingjun=nsm/(len(list_int))
return ma,minx,pingjun | [
"[email protected]"
] | |
f0457b814ef72bf357cd55551afddde24bb8f179 | 9cbc458ae2fa1f2be6eeb6fb4f4dfc49db464f1b | /financial/productgroup/migrations/0001_initial.py | 14fd26e06c60016d2d9401b4c4f5ffac79deec65 | [] | no_license | reykennethdmolina/projectfinsys | 45f8bd3248ad4b11c78cee6beefab040e6d58343 | a8604b9450b890e26b8f59f6acd76d64c415ccce | refs/heads/master | 2021-01-11T17:36:01.648840 | 2017-01-23T11:21:04 | 2017-01-23T11:21:04 | 79,797,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-01-17 06:07
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Productgroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True)),
('description', models.CharField(max_length=250)),
('status', models.CharField(choices=[('A', 'Active'), ('I', 'Inactive'), ('C', 'Cancelled'), ('O', 'Posted'), ('P', 'Printed')], default='A', max_length=1)),
('enterdate', models.DateTimeField(auto_now_add=True)),
('modifydate', models.DateTimeField(default=datetime.datetime(2017, 1, 17, 14, 7, 34, 668000))),
('isdeleted', models.IntegerField(default=0)),
('enterby', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='productgroup_enter', to=settings.AUTH_USER_MODEL)),
('modifyby', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='productgroup_modify', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-pk'],
'db_table': 'productgroup',
'permissions': (('view_productgroup', 'Can view productgroup'),),
},
),
]
| [
"[email protected]"
] | |
eec679bc8e8a903c116e1c4a9cc0fcfed3bde0af | f38c30595023f4f272121576b9e62ed2adbed7de | /contact_list.py | 8c047db9d0fd3e5d8d18d73a7614d2fe2b25233d | [] | no_license | devArist/contact_app_project | f1f19ed2cb4a9261575e5f182e4dcb28ba44e082 | 81d1d639d2e7a362490397d334345ce24a154789 | refs/heads/main | 2023-07-12T22:49:19.816760 | 2021-08-30T15:56:44 | 2021-08-30T15:56:44 | 401,285,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from kivymd.uix.boxlayout import MDBoxLayout
from kivy.lang import Builder
Builder.load_file('contact_list.kv')
class ContactBoxLayout(MDBoxLayout):
pass | [
"[email protected]"
] | |
e2a354f7de78bb119094313ee9b25118e374ca6c | ba2d449486c58578581b8de7b2b6f21074be6274 | /02 Linked Lists/2-8-Loop-Detection.py | 6af5ea7f052ea96436e98812922ad1180e7fa7bb | [] | no_license | theoliao1998/Cracking-the-Coding-Interview | 4e0abef8659a0abf33e09ee78ce2f445f8b5d591 | 814b9163f68795238d17aad5b91327fbceadf49e | refs/heads/master | 2020-12-09T12:46:10.845579 | 2020-07-25T05:39:19 | 2020-07-25T05:39:19 | 233,306,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | # Loop Detection: Given a circular linked list, implement an algorithm that returns the node at the
# beginning of the loop.
# DEFINITION
# Circular linked list: A (corrupt) linked list in which a node's next pointer points to an earlier node, so
# as to make a loop in the linked list.
# EXAMPLE
# Input: A -> B -> C - > D -> E -> C [the same C as earlier]
# Output: C
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def append(self, x):
n = self
while n.next:
n = n.next
n.next = ListNode(x)
def loopDectection(n):
def getLoopLength(n):
slow, fast = n, n.next
length = 0
while fast:
length += 1
if slow == fast:
return length
slow = slow.next
if not fast.next:
return 0
fast = fast.next.next
l = getLoopLength(n)
if not l:
return None
slow = n
fast = n
for _ in range(l):
fast = fast.next
while slow != fast:
slow, fast = slow.next, fast.next
return slow
# A = ListNode(1)
# B = ListNode(2)
# C = ListNode(3)
# D = ListNode(4)
# E = ListNode(5)
# A.next = B
# B.next = C
# C.next = D
# D.next = E
# E.next = C
# print(loopDectection(A).val)
| [
"[email protected]"
] | |
b26c45b8a0ae7b082b96599a14f020f230ee3eca | 8b16bd61c79113ff575def261e12f0e2125e4d90 | /browser/migrations/0004_search_history.py | 44305eecb73f1f97b11f14b0373386db7ecccd80 | [] | no_license | alvarantson/veebikaabits2.0 | 88f99e2fff8d0ef76daec3d3d3f4d6e19ed6d274 | 412d7d2fdc35582ba7210ea6108087a0d5ac9d7e | refs/heads/master | 2020-12-02T10:59:05.173248 | 2020-01-03T10:00:36 | 2020-01-03T10:00:36 | 230,990,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # Generated by Django 2.1.3 on 2018-11-28 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0003_blacklist'),
]
operations = [
migrations.CreateModel(
name='search_history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search_item', models.CharField(max_length=999)),
('time_elapsed', models.CharField(max_length=999)),
('search_datetime', models.CharField(max_length=999)),
('items_total', models.CharField(max_length=999)),
('items_okidoki', models.CharField(max_length=999)),
('items_osta', models.CharField(max_length=999)),
('items_soov', models.CharField(max_length=999)),
('items_kuldnebors', models.CharField(max_length=999)),
],
),
]
| [
"[email protected]"
] | |
9337e099bf9ff81091912bb90e98f59afe773fe5 | d7ca36f20465870e67e7d6832f8e1b8348af12fc | /test/test_linear.py | ca9b35b8ac8886a9b4d3721c6f3eb6f8eb94d575 | [] | no_license | hlcr/LanguageNetworkAnalysis | c109e670534367c782fb71697a92a3ca95aba098 | 65f6c8086f3e4282b15359cc99cf57a682e6b814 | refs/heads/master | 2020-04-24T07:40:04.100213 | 2020-04-17T09:02:05 | 2020-04-17T09:02:05 | 171,805,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
# 线性回归
clf = linear_model.LinearRegression()
# 训练
np.array([1, 2, 3])
clf.fit(np.array([2, 4, 6]).reshape(-1,1), np.array([0, 2, 4]).reshape(-1,1))
# 表达式参数
a, b = clf.coef_, clf.intercept_
print(a)
print(b)
# # 画图
# # 1.真实的点
# plt.scatter(df['square_feet'], df['price'], color='blue')
#
# # 2.拟合的直线
# plt.plot(df['square_feet'], regr.predict(df['square_feet'].reshape(-1,1)), color='red', linewidth=4)
#
# plt.show() | [
"[email protected]"
] | |
8dc206ecc05711beff9e20fc9c645ee81ed652dd | 1abcd4686acf314a044a533d2a541e83da835af7 | /backjoon_level_python/1701.py | 4112638d3382c51244a90f87bed9c9d769c2e387 | [] | no_license | HoYoung1/backjoon-Level | 166061b2801514b697c9ec9013db883929bec77e | f8e49c8d2552f6d62be5fb904c3d6548065c7cb2 | refs/heads/master | 2022-05-01T05:17:11.305204 | 2022-04-30T06:01:45 | 2022-04-30T06:01:45 | 145,084,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | def get_failure_array(text):
failure = [0] * len(text)
j = 0
for i in range(1, len(text)):
while j > 0 and text[i] != text[j]:
j = failure[j - 1]
if text[i] == text[j]:
failure[i] = j + 1
j += 1
return failure
def solve(input_text):
max_value = 0
for i in range(len(input_text)-1):
failure = get_failure_array(input_text[i:])
max_value = max(max_value, max(failure))
return max_value
if __name__ == '__main__':
input_text = input()
print(solve(input_text))
| [
"[email protected]"
] | |
7f559d2862ef1e3f93bcde50464d07a9767ac80e | 3d88748960deb31c674525df2bd9d79ba1d2db1a | /pythonlib/bin/pyfftr | 4127562195c91f40b757688d14d9e521c09d2ba6 | [
"BSD-2-Clause"
] | permissive | johnkerl/scripts-math | 1a0eb6ce86fd09d593c82540638252af5036c535 | cb29e52fec10dd00b33c3a697dec0267a87ab8bb | refs/heads/main | 2022-01-31T17:46:05.002494 | 2022-01-17T20:40:31 | 2022-01-17T20:40:31 | 13,338,494 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,546 | #!/usr/bin/python -Wall
# ----------------------------------------------------------------
# John Kerl
# [email protected]
# 2006-03-20
# (Ported to Python 2006-03-06)
#
# This is a radix-2 fast Fourier transform. Example:
#
# xxx cmts re real-to-complex transform; numerical recipes
# ----------------------------------------------------------------
from __future__ import division # 1/2 = 0.5, not 0.
import sys
import pyfft_m
import pyrcio_m
from math import *
# ----------------------------------------------------------------
def usage():
print >> sys.stderr, "Usage:", sys.argv[0], "[options] [file name]"
print >> sys.stderr, "If the file name is omitted, input is taken from standard input."
print >> sys.stderr, "Format is in decimal real, one sample per line. E.g."
print >> sys.stderr, " 1.0"
print >> sys.stderr, " 2.0"
print >> sys.stderr, " 3.0"
print >> sys.stderr, " 4.0"
print >> sys.stderr, "Options:"
print >> sys.stderr, " -fi: input folding"
print >> sys.stderr, " -nfi: no input folding"
print >> sys.stderr, " -fo: output folding"
print >> sys.stderr, " -nfo: no output folding"
print >> sys.stderr, " -fwd: forward FFT (exp(-i 2 pi k/N) kernel)"
print >> sys.stderr, " -rev: reverse FFT (exp( i 2 pi k/N) kernel)"
print >> sys.stderr, " -s: scaling"
print >> sys.stderr, " -ns: no scaling"
print >> sys.stderr, " -dft: Use DFT. Allows N not to be a power of 2."
sys.exit(1)
# ================================================================
# Start of program
fold_in = 0
fold_out = 0
forward = 1
scale = 1
use_dft = 0
file_name = "-"
argc = len(sys.argv)
argi = 1
while (argi < argc):
arg = sys.argv[argi]
if (arg[0] != '-'):
break
if (arg == "-fi"):
fold_in = 1
elif (arg == "-nfi"):
fold_in = 0
elif (arg == "-fo"):
fold_out = 1
elif (arg == "-nfo"):
fold_out = 0
elif (arg == "-fwd"):
forward = 1
elif (arg == "-rev"):
forward = 0
elif (arg == "-s"):
scale = 1
elif (arg == "-ns"):
scale = 0
elif (arg == "-dft"):
use_dft = 1
elif (arg == "-ndft"):
use_dft = 0
else:
usage()
argi += 1
if ((argc - argi) == 1):
file_name = sys.argv[argi]
elif ((argc - argi) == 0):
file_name = "-"
else:
usage()
# real input f_j: j = 0 .. N-1
# split: fe_j, fo_j: j = 0 .. N/2-1
# h_j = fe_j + i fo_j: j = 0 .. N/2-1
# By linearity: H_k = Fe_k + i Fo_k: k = 0 .. N/2-1
# Fe_k = sum_{j=0}^{N/2-1} f_{2j} w_N^2 [note w_N^2 = w_{N/2}]
# Fo_k = sum_{j=0}^{N/2-1} f_{2j+1} w_N^2
# F_k = Fe_k + w_N^k Fo_k
# F_k = 1/2(H_k + H_{N/2-k}^*) - i/2(H_k - H_{N/2-k}^*) w_N^k
# Save only 1st half of F_k: k = 0 .. N/2-1
# Need H_{N/2}: but = H_0. (Why?)
# -- Inverse --
# Fe_k = 1/2(F_k + F_{N/2-k}^*)
# "peel" F_{N/2} "from" F_0
# Fo_k = 1/2 w_N^{-k}(F_{N/2} - F_{N/2-k}^*)
# H_k = Fe_k + i Fo_k
f = pyrcio_m.read_real_vector(file_name)
print "f:"
pyrcio_m.print_real_vector(f)
print
N = len(f)
N2 = int(N/2)
print "N =", N
print "N2 =", N2
h = []
for j in range(0, N2):
h.append(f[2*j] + 1j*f[2*j+1])
print "h:"
pyrcio_m.print_complex_vector(h)
print
if (use_dft):
H = pyfft_m.dft(h, fold_in, fold_out, forward, scale)
else:
H = pyfft_m.fft(h, fold_in, fold_out, forward, scale)
H.append(H[0]) # Append H[N/2]
print "H:"
pyrcio_m.print_complex_vector(H)
print
w_N = complex(cos(2*pi/N), sin(2*pi/N))
F = []
for k in range(0, N2+1):
Hnegkbar = H[N2-k].conjugate()
Fek = 0.5*(H[k] + Hnegkbar)
Fok = 0.5*(H[k] - Hnegkbar) * (-1j)
F.append(Fek + Fok * w_N**k)
print "F:"
pyrcio_m.print_complex_vector(F)
print
| [
"[email protected]"
] | ||
0266fc2e290229ee4fb5b79ceec76bc0a22a0e42 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902TG/128-tideGauge.py | a22e221a52b78d90b03e7e76dfe5eb0acc6f5054 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,075 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
MERRAv2 netCDF extraction script - template
To create an extraction script for each tide gauge
@author: Michael Tadesse
"""
import os
import pandas as pd
from d_merra_define_grid import Coordinate, findPixels, findindx
from c_merra_read_netcdf import readnetcdf
from f_merra_subset import subsetter
def extract_data(delta= 3):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
dir_in = "/lustre/fs0/home/mtadesse/MERRAv2/data"
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/merraLocalized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#cd to the obs_surge dir to get TG information
os.chdir(dir_in)
years = os.listdir()
#################################
#looping through the year folders
#################################
#to mark the first csv
firstCsv = True;
for yr in years:
os.chdir(dir_in)
#print(yr, '\n')
os.chdir(os.path.join(dir_in, yr))
####################################
#looping through the daily .nc files
####################################
for dd in os.listdir():
os.chdir(os.path.join(dir_in, yr)) #back to the predictor folder
print(dd, '\n')
#########################################
#get netcdf components - predictor file
#########################################
nc_file = readnetcdf(dd)
lon, lat, time, predSLP, predU10, predV10 = \
nc_file[0], nc_file[1], nc_file[2], nc_file[3], nc_file[4]\
, nc_file[5]
x = 128
y = 129
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
#print(tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(surge.iloc[0,0], surge.iloc[0,1])
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
#loop through preds#
#subset predictor on selected grid size
predictors = {'slp':predSLP, 'wnd_u':predU10, \
'wnd_v':predV10}
for xx in predictors.keys():
pred_new = subsetter(dd, predictors[xx], ind_grids, time)
if xx == 'slp':
if firstCsv:
finalSLP = pred_new
else:
finalSLP = pd.concat([finalSLP, pred_new], axis = 0)
print(finalSLP.shape)
elif xx == 'wnd_u':
if firstCsv:
finalUwnd = pred_new
else:
finalUwnd = pd.concat([finalUwnd, pred_new], axis = 0)
elif xx == 'wnd_v':
if firstCsv:
finalVwnd = pred_new
firstCsv = False;
else:
finalVwnd = pd.concat([finalVwnd, pred_new], axis = 0)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name_old = tg.split('.mat.mat.csv')[0]
tg_name = '-'.join([str(t), tg_name_old])
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#save as csv
finalSLP.to_csv('slp.csv')
finalUwnd.to_csv('wnd_u.csv')
finalVwnd.to_csv('wnd_v.csv')
#run script
extract_data(delta= 3)
| [
"[email protected]"
] | |
9886045608f2213f99a41a0af0b8b79aa8486538 | 69a4db25d9f7d4e67cf2bcfe005e5cba9915180a | /examprep.py | 4eae0ad01d13431e655ff277605755e813e07ef2 | [] | no_license | riley-csp-2019-20/final-exam-semester-1-taylor77205 | ca3211a606819eab48d118bb6e5dc08dcf190b9c | ee37ca47c1090b8a23a6d3ed01448ed1494d9183 | refs/heads/master | 2020-11-25T12:22:28.536638 | 2019-12-19T16:39:33 | 2019-12-19T16:39:33 | 228,657,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | import turtle as trtl
shape = "arrow"
player = trtl.Turtle(shape = shape)
player.speed(0)
player.penup()
shape = "circle"
circle = trtl.Turtle(shape = shape)
circle.penup()
def up():
player.setheading(90)
player.forward(10)
circle.setheading(90)
circle.forward(10)
def down():
player.setheading(270)
player.forward(10)
circle.setheading(270)
circle.forward(10)
def right():
player.setheading(00)
player.forward(10)
circle.setheading(00)
circle.forward(10)
def left():
player.setheading(180)
player.forward(10)
circle.setheading(180)
circle.forward(10)
wn=trtl.Screen()
wn.onkeypress(up,"Up")
wn.onkeypress(down,"Down")
wn.onkeypress(right,"Right")
wn.onkeypress(left,"Left")
wn.listen()
wn.mainloop() | [
"[email protected]"
] | |
30723c2e851a6064831ceee31779a2e0923f132d | 8de2a78facbdedb033e349692c71e33ce6f47315 | /string_format.py | bda55de3713cf8d1cf8a87976aba26d564aa51b8 | [] | no_license | KshitjMaheshwari/python38-GLA | 230e4ce96c4416bbc7b11477772a827ee0d62a46 | 4d29d377ab38f75510f995293f62b7c59229423b | refs/heads/master | 2022-04-17T05:17:15.205216 | 2020-02-18T19:21:18 | 2020-02-18T19:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | '''
str.format() is one of the string formatting methods in Python3,
which allows multiple substitutions and value formatting.
This method lets us concatenate elements within a string through positional formatting.
'''
a = 10
b = 30
c = a + b
temp = 'result is %d of %d and %d'
f = temp % (c, a, b)
print(f)
# tag f format
a = 10
b = 30
c = a + b
temp = f"result is {c} of {a} and {b} 😙"
print(temp) # result is 40 of 10 and 30
f = f'result is {c} of {a} {b}'
print(f) # result is 40 of 10 30
dh = 'result is {} of {} {}'
f = dh.format(c, a, b)
print(f) # result is 40 of 10 30
f = 'result is %d of %d %d' % (c, a, b)
print(f)
k = 'my name is and my record is {:10d}'.format(22223)
print(k)
| [
"[email protected]"
] | |
3d635f23f15d180a8acda2ef07e91f7f9fb3984e | 9818262abff066b528a4c24333f40bdbe0ae9e21 | /Day 60/TheBomberMan.py | 46f6d9fdaed89da0f250aff715ff45b108c9a598 | [
"MIT"
] | permissive | skdonepudi/100DaysOfCode | 749f62eef5826cb2ec2a9ab890fa23e784072703 | af4594fb6933e4281d298fa921311ccc07295a7c | refs/heads/master | 2023-02-01T08:51:33.074538 | 2020-12-20T14:02:36 | 2020-12-20T14:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | ''''
Bomberman lives in a rectangular grid. Each cell in the grid either contains a bomb or nothing at all.
Each bomb can be planted in any cell of the grid but once planted, it will detonate after exactly 3 seconds. Once a bomb detonates, it's destroyed — along with anything in its four neighboring cells. This means that if a bomb detonates in cell , any valid cells and are cleared. If there is a bomb in a neighboring cell, the neighboring bomb is destroyed without detonating, so there's no chain reaction.
Bomberman is immune to bombs, so he can move freely throughout the grid. Here's what he does:
Initially, Bomberman arbitrarily plants bombs in some of the cells, the initial state.
After one second, Bomberman does nothing.
After one more second, Bomberman plants bombs in all cells without bombs, thus filling the whole grid with bombs. No bombs detonate at this point.
After one more second, any bombs planted exactly three seconds ago will detonate. Here, Bomberman stands back and observes.
Bomberman then repeats steps 3 and 4 indefinitely.
Note that during every second Bomberman plants bombs, the bombs are planted simultaneously (i.e., at the exact same moment), and any bombs planted at the same time will detonate at the same time.
Given the initial configuration of the grid with the locations of Bomberman's first batch of planted bombs, determine the state of the grid after seconds.
For example, if the initial grid looks like:
...
.O.
...
it looks the same after the first second. After the second second, Bomberman has placed all his charges:
OOO
OOO
OOO
At the third second, the bomb in the middle blows up, emptying all surrounding cells:
...
...
...
Function Description
Complete the bomberMan function in the editory below. It should return an array of strings that represent the grid in its final state.
bomberMan has the following parameter(s):
n: an integer, the number of seconds to simulate
grid: an array of strings that represents the grid
Input Format
The first line contains three space-separated integers , , and , The number of rows, columns and seconds to simulate.
Each of the next lines contains a row of the matrix as a single string of characters. The . character denotes an empty cell, and the O character (ascii 79) denotes a bomb.
Constraints
Subtask
for of the maximum score.
Output Format
Print the grid's final state. This means lines where each line contains characters, and each character is either a . or an O (ascii 79). This grid must represent the state of the grid after seconds.
Sample Input
6 7 3
.......
...O...
....O..
.......
OO.....
OO.....
Sample Output
OOO.OOO
OO...OO
OOO...O
..OO.OO
...OOOO
...OOOO
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the bomberMan function below.
def bomberMan(n, grid):
result = [[i for i in r] for r in grid]
passed = 1
coords = [[x, y] for x in range(r) for y in range(c) if grid[x][y]=="O"]
if n in [0, 1]: return grid
elif n % 2 == 0: return ['O' * len(x) for x in grid]
while passed < 4+n%4:
passed += 1
if passed%2 == 0:
result = [["O" for i in range(c)] for j in range(r)]
elif passed%2 == 1:
for coord in coords:
row, col = coord[0], coord[1]
result[row][col] = "."
if 0<=row-1<=r-1:
result[row-1][col] = "."
if 0<=row+1<=r-1:
result[row+1][col] = "."
if 0<=col-1<=c-1:
result[row][col-1] = "."
if 0<=col+1<=c-1:
result[row][col+1] = "."
coords = [[x, y] for x in range(r) for y in range(c) if result[x][y]=="O"]
for i in range(r):
result[i] = ''.join(result[i])
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
rcn = input().split()
r = int(rcn[0])
c = int(rcn[1])
n = int(rcn[2])
grid = []
for _ in range(r):
grid_item = input()
grid.append(grid_item)
result = bomberMan(n, grid)
fptr.write('\n'.join(result))
fptr.write('\n')
fptr.close()
| [
"[email protected]"
] | |
6773f61b800ed243653848153717040551b46c5c | 56789f51d1feb757171b151b56c59143e74c6fe1 | /projects/examples/dragon_button_relay_push_only/pinButton.py | f0fc6e7fb8a5a84c6121ba1877377927a1833a31 | [] | no_license | aid402/micropython_project | 235926120e8a78033572386b9407a5eb6e7f473e | 9111398492f0cf511da8e6f83b34d8e4e4f90278 | refs/heads/master | 2020-07-21T10:29:16.935739 | 2018-11-02T22:07:14 | 2018-11-02T22:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py |
from machine import Pin
import time
import relay
class PinButton:
'''
B
'''
# init
def __init__(self, pinNum, Pull, debug=False, relay_control=None):
self._pin = Pin(pinNum, Pin.IN, Pull )
self.debug = debug
self.status = 0
self.value = None
self._value = None
self.relay = relay.RELAY(relay_control)
self.button = None # Generator instance
# self.button = self.makebutton() # Generator instance
#
# next(self.button)
# for _ in range(128):
# next(self.button)
# time.sleep_ms(1)
def makebutton(self):
delays = -25 # mS delay
while True:
self._value = self._pin.value()
t_start = time.ticks_ms()
self.status = 1
if self._value == 0:
while time.ticks_diff(t_start, time.ticks_ms()) <= delays:
self.status = 10
yield None
self.relay.set_state(1)
self.value = self._value
self.status = 11
else:
self.value = 1
self.relay.set_state(0)
self.status = 12
yield None
def start(self):
self.button = self.makebutton() # Generator instance
next(self.button)
def stop(self):
self.button = None # Generator instance
@property
def push(self):
'''
T
'''
try:
next(self.button)
except StopIteration:
if self.debug:
print("StopIteration")
return -255
value = self.value
if self.status == 0:
value = -1
return value
| [
"[email protected]"
] | |
a293a4c0f1bef50f86231c141441a29c0ea77f66 | b51fcaacf7a43cfc4e378b27090c652ed5bd8ee2 | /pyfx/tests/test_spreadhandler.py | 4dad32859c0dafba258a980ee780e00e99c632b1 | [] | no_license | tetocode/fxarb | 56526308eb91616eb60b13152ad03dab73de7ca4 | 00261dc6832047375499363af2db44efa2d36008 | refs/heads/master | 2022-10-18T16:45:51.971435 | 2020-06-03T16:19:39 | 2020-06-03T16:19:39 | 269,136,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | import copy
from collections import deque
from datetime import datetime, timedelta
import gevent
import pytest
import pytz
from pyfx.pricehandler import PriceHandler, Price
from pyfx.spreadhandler import SpreadHandler, Spread
def test_spread_handler():
h = SpreadHandler(PriceHandler())
assert h.prices == {}
now = datetime.utcnow().replace(tzinfo=pytz.utc)
now2 = now + timedelta(minutes=1, seconds=1)
prices = [Price('xxx', 'USD/JPY', now, 0.01, 0.02)]
h.handle(prices=copy.deepcopy(prices))
assert h.prices == {'xxx': {'USD/JPY': deque(prices)}}
expected = {
('xxx', 'xxx'): {
'USD/JPY': deque([
Spread(('xxx', 'xxx'), 'USD/JPY', now, 0.01, 0.02)
])
}
}
assert h.spreads == expected
prices = [
Price('xxx', 'USD/JPY', now2, 0.01, 0.03),
Price('xxx', 'EUR/JPY', now, 0.03, 0.05),
Price('yyy', 'EUR/JPY', now2, 0.06, 0.08),
]
h.handle(prices=copy.deepcopy(prices))
expected = {
('xxx', 'xxx'): {
'USD/JPY': deque([
Spread(('xxx', 'xxx'), 'USD/JPY', now, 0.01, 0.02),
Spread(('xxx', 'xxx'), 'USD/JPY', now2, 0.01, 0.03)
]),
'EUR/JPY': deque([
Spread(('xxx', 'xxx'), 'EUR/JPY', now, 0.03, 0.05),
])
},
('xxx', 'yyy'): {
'EUR/JPY': deque([
Spread(('xxx', 'yyy'), 'EUR/JPY', now2, 0.03, 0.08)
])
},
('yyy', 'xxx'): {
'EUR/JPY': deque([
Spread(('yyy', 'xxx'), 'EUR/JPY', now2, 0.06, 0.05)
])
},
('yyy', 'yyy'): {
'EUR/JPY': deque([
Spread(('yyy', 'yyy'), 'EUR/JPY', now2, 0.06, 0.08)
])
}
}
assert h.spreads == expected
| [
"_"
] | _ |
c950deb33595ab7513145a259c0dad0684cff22f | e5255d7588b117f000c8e11a57127d7bbb63a6e6 | /collection/j1/01_getImageJson.py | 6d1e626ddc17536930faed75ea9b0610302058d2 | [] | no_license | nakamura196/toyo_images | 4134e9ae7d5790e04c157195ecdea10f952dbbf2 | 60c71b23b6028c639c4f9b1ee3083c083421a336 | refs/heads/master | 2020-08-25T13:46:25.334222 | 2019-10-25T03:15:06 | 2019-10-25T03:15:06 | 216,973,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | import urllib.request
from bs4 import BeautifulSoup
from time import sleep
import json
import hashlib
import os
from PIL import Image
import requests
import shutil
import urllib.parse
def download_img(url, file_name):
print("img="+url)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_name, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
def dwn(url):
html = requests.get(url).text
soup = BeautifulSoup(html, "html.parser")
img = soup.find("img")
src = urllib.parse.urljoin(url, img.get("src"))
opath = src.replace("http://124.33.215.236/", "../../")
if not os.path.exists(opath):
tmp = os.path.split(opath)
os.makedirs(tmp[0], exist_ok=True)
download_img(src, opath)
url = "http://124.33.215.236/gazou/index_img.php?tg=J1"
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html, "html.parser")
aas = soup.find_all("a")
urls = []
for a in aas:
href = urllib.parse.urljoin(url, a.get("href"))
urls.append(href)
for url0 in sorted(urls):
if "201511" in url0:
print("url0="+url0)
id = url0.split("lstdir=")[1].split("&")[0]
try:
html = requests.get(url0).text
except Exception as e:
print(e)
continue
soup = BeautifulSoup(html, "html.parser")
dwn(url0)
aas = soup.find_all("a")
for a in aas:
href = urllib.parse.urljoin(url0, a.get("href"))
if "201511.php" in href:
dwn(href) | [
"[email protected]"
] | |
c5025700fd6858b320117ab2a06db5014ae2496a | 0e94b21a64e01b992cdc0fff274af8d77b2ae430 | /python/022_Objective.py | 8b3d80fef29ab63035d097dd75d51e71daa5b828 | [] | no_license | yangnaGitHub/LearningProcess | 1aed2da306fd98f027dcca61309082f42b860975 | 250a8b791f7deda1e716f361a2f847f4d12846d3 | refs/heads/master | 2020-04-15T16:49:38.053846 | 2019-09-05T05:52:04 | 2019-09-05T05:52:04 | 164,852,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | #可以通过类名访问其属性
#类对象支持两种操作==>属性引用(obj.name)和实例化
#将对象创建为有初始状态的,定义一个名为__init__()的特殊方法(自动调用,构造方法)
#使用def关键字可以为类定义一个方法,类方法必须包含参数self,且为第一个参数
#支持类的继承,圆括号中基类的顺序,若是基类中有相同的方法名,从左到右查找基类中是否包含方法
#两个下划线开头声明该属和方法为私有,不能在类地外部被使用或直接访问
class people:
name = ""
age = 0
__weight = 0#私有属性,在类外部无法直接进行访问
def __init__(self, name, age, weight):
self.name = name
self.age = age
self.__weight = weight
def speak(self):
print("%s ==> %d" % (self.name, self.age))
class student(people):
grade = 0
def __init__(self, name, age, weight, grade):
people.__init__(self, name, age, weight)
self.grade = grade
def speak(self):
print("%s ==> %d ==> %d" % (self.name, self.age, self.grade))
stu = student("natasha", 22, 58, 2)
stu.speak()
#重写,子类重写父类的方法
class Parent:
def method(self):
print("Parent")
class Child(Parent):
def method(self):
print("Child")
child = Child()
child.method()
#类的专有方法
#__init__构造函数,在生成对象时调用
#__del__析构函数
#__repr__打印
#__setitem__按照索引赋值
#__getitem__按照索引获取值
#__len__获得长度
#__cmp__比较运算
#__call__函数调用
#__add__加运算
#__sub__减运算
#__mul__乘运算
#__div__除运算
#__mod__求余运算
#__pow__乘方
#支持运算符重载
class Vector:
def __init__(self, val1, val2):
self.val1 = val1
self.val2 = val2
def __str__(self):
return "Vector(%d, %d)" % (self.val1, self.val2)
def __add__(self, other):
return Vector(self.val1 + other.val1, self.val2 + other.val2)
v1 = Vector(2, 10)
v2 = Vector(5, -2)
print(v1 + v2)
| [
"[email protected]"
] | |
e5c95f65e2d375ab804087caa24c1424a0aba734 | 291f0aa9a40eeca26fb08106c952b9347db7dba7 | /nz_crawl_demo/day2/requests/biquge.py | 4436df5628c3550c69cfc0f0492fb0cc28404bae | [
"Apache-2.0"
] | permissive | gaohj/nzflask_bbs | fad10b93f8f495a94d5d6db6f5c60d85c1c85518 | 36a94c380b78241ed5d1e07edab9618c3e8d477b | refs/heads/master | 2022-12-12T21:43:17.417294 | 2020-03-20T10:28:22 | 2020-03-20T10:28:22 | 239,702,874 | 0 | 2 | Apache-2.0 | 2022-12-08T03:50:07 | 2020-02-11T07:34:01 | JavaScript | UTF-8 | Python | false | false | 569 | py | import requests
url = "http://www.xbiquge.la/login.php?jumpurl=http://www.xbiquge.la/"
data = {
"LoginForm[username]":"kangbazi666",
"LoginForm[password]":'kangbazi666',
}
headers = {
'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0"
}
#登录
session = requests.Session() #实例化一个session对象
session.post(url,data=data,headers=headers)
res = session.get("http://www.xbiquge.la/modules/article/bookcase.php")
with open('biquge.html','w',encoding='utf-8') as fp:
fp.write(res.content.decode('utf-8'))
| [
"[email protected]"
] | |
4edc4a4117a2f5785f06ed7c041ecc6251e057d3 | 13f900b9dc0c3e838ff788febaa59514b97d1128 | /Proyecto/apps.py | 40c7b3b40f6d31687e5ba04a1ee90b01b19feb2f | [] | no_license | JorgitoR/App-Proyectos-Slabcode | 68439c5fe0dbe58a004b9f04be807f6756d84a7f | 173ea655bf00f8b5ae7fb0eb4ee0cf0ed5e6f3a7 | refs/heads/main | 2023-04-12T21:52:16.339073 | 2021-04-10T21:02:57 | 2021-04-10T21:02:57 | 356,660,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from django.apps import AppConfig
class ProyectoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Proyecto'
| [
"[email protected]"
] | |
e9bb27222c38f40ffe7f88c5cf3722d5dd47c363 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/124/24836/submittedfiles/testes.py | 5775a4b04ac6e07e20b13628e10307df3311b756 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
def vabsol(x):
if x < 0:
x = -1*x
return x
def calculopi(y):
c = 3
d = 2
for i in range (0, y, 1):
if i%2 != 0:
c = c - (4/(d*(d+1)*(d+2)))
elif i%2 == 0:
c = c + (4/(d*(d+1)*(d+2)))
d = d + 2
return c
def cos(z, epsilon):
cosz = 1
v = 2
fat = 1
cont = 0
d = (z**v)/fat
while epsilon <= d:
for i in range (v, 0, -1):
fat = fat*i
if cont%2 != 0:
cosz = cosz + d
elif cont%2 == 0:
cosz = cosz - d
v = v + 2
fat = 1
cont = cont + 1
return cosz
def razaurea(m, epsilon):
pi = calculopi(m)
fi = 2*cos(pi/5, epsilon)
return fi
m = int(input('Digite o número m de termos da fórmula de pi: '))
epsilon = input('Digite o epsilon para o cálculo da razão áurea: ')
m = vabsol(m)
print('Valor aproximado de pi: %.15f' %calculopi(m))
print('Valor aproximado da razão áurea: %.15f' %razaurea(m, epsilon)) | [
"[email protected]"
] | |
aa293e1ff78c775da8ee0d65c93d61dbe77e9ece | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_212/ch38_2020_06_18_18_19_09_636520.py | a1acd720111d63b2d1b433ca15896300cc635a3a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | def quantos_uns (n):
soma = 0
i=0
num = str(n)
while i <= len(num):
if num[i] == '1':
soma += 1
i +=1
return soma
| [
"[email protected]"
] | |
9165fa645530445bd62b1dd6a0a62069ada7bff7 | 06e34e2dface0b87fa785cab7e65422a5f20ba18 | /Solutions/900-RLE-Iterator/python.py | df44e067f90f609efe109d47495f2673b48fe69d | [] | no_license | JerryHu1994/LeetCode-Practice | c9841b0ce70451c19c8a429a3898c05b6233e1d4 | b0ce69985c51a9a794397cd98a996fca0e91d7d1 | refs/heads/master | 2022-02-10T04:42:28.033364 | 2022-01-02T04:44:22 | 2022-01-02T04:44:22 | 117,118,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | class RLEIterator(object):
def __init__(self, A):
"""
:type A: List[int]
"""
self.li = A
def next(self, n):
"""
:type n: int
:rtype: int
"""
if len(self.li) == 0: return -1
cnt = n
while cnt > 0:
if len(self.li) == 0: return -1
if cnt <= self.li[0]:
ret = self.li[1]
self.li[0] -= cnt
return ret
else:
cnt -= self.li[0]
self.li.pop(0)
self.li.pop(0)
return -1
# Your RLEIterator object will be instantiated and called as such:
# obj = RLEIterator(A)
# param_1 = obj.next(n) | [
"[email protected]"
] | |
818d347d5ad5029e8246fe46f97504bcf6646510 | 8a42be3f930d8a215394a96ad2e91c95c3b7ff86 | /Build/Instalation/GeneralDb/Marathon/MarathonTests_3.5.2/HSQL_RecordEditor1/TestCases/SaveAs/SaveAsXml1.py | 7e17d8d59bd69361fc57951c63a851daf3fe52ae | [] | no_license | java-tools/jrec | 742e741418c987baa4350390d126d74c0d7c4689 | 9ece143cdd52832804eca6f3fb4a1490e2a6f891 | refs/heads/master | 2021-09-27T19:24:11.979955 | 2017-11-18T06:35:31 | 2017-11-18T06:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | #{{{ Marathon
from default import *
#}}} Marathon
from Modules import commonBits
def test():
set_java_recorded_version("1.6.0_22")
if frame(' - Open File:0'):
select('File', commonBits.sampleDir() + 'DTAR020_tst1.bin')
click('Edit')
close()
if window('Record Editor'):
click('Export')
if frame('Export - DTAR020_tst1.bin:0'):
## select('JTabbedPane_16', 'Xml')
select('File Name_2', 'Xml')
select('Edit Output File', 'true')
click('save file')
close()
if frame('Tree View - DTAR020_tst1.bin.xml:0'):
select('net.sf.RecordEditor.utils.swing.treeTable.JTreeTable_10', 'rows:[9],columns:[Xml~Namespace]')
assert_content('net.sf.RecordEditor.utils.swing.treeTable.JTreeTable_10', [ ['', '', 'UTF-8', '1.0', 'false', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '63604808', '20', '40118', '170', '1', '4.87', 'True', ''],
['', '', '', '', '69684558', '20', '40118', '280', '1', '19.00', 'True', ''],
['', '', '', '', '69684558', '20', '40118', '280', '-1', '-19.00', 'True', ''],
['', '', '', '', '69694158', '20', '40118', '280', '1', '5.01', 'True', ''],
['', '', '', '', '62684671', '20', '40118', '685', '1', '69.99', 'True', ''],
['', '', '', '', '62684671', '20', '40118', '685', '-1', '-69.99', 'True', ''],
['', '', '', '', '61664713', '59', '40118', '335', '1', '17.99', 'True', ''],
['', '', '', '', '61664713', '59', '40118', '335', '-1', '-17.99', 'True', ''],
['', '', '', '', '61684613', '59', '40118', '335', '1', '12.99', 'True', ''],
['', '', '', '', '68634752', '59', '40118', '410', '1', '8.99', 'True', ''],
['', '', '', '', '60694698', '59', '40118', '620', '1', '3.99', 'True', ''],
['', '', '', '', '60664659', '59', '40118', '620', '1', '3.99', 'True', ''],
['', '', '', '', '60614487', '59', '40118', '878', '1', '5.95', 'True', ''],
['', '', '', '', '68654655', '166', '40118', '60', '1', '5.08', 'True', ''],
['', '', '', '', '69624033', '166', '40118', '80', '1', '18.19', 'True', ''],
['', '', '', '', '60604100', '166', '40118', '80', '1', '13.30', 'True', ''],
['', '', '', '', '68674560', '166', '40118', '170', '1', '5.99', 'True', '']
])
select('net.sf.RecordEditor.utils.swing.treeTable.JTreeTable_10', 'rows:[9],columns:[Xml~Namespace]')
click('Close')
## select('net.sf.RecordEditor.utils.swing.treeTable.JTreeTable_10', 'rows:[9],columns:[Xml~Namespace]')
close()
select_menu('Window>>DTAR020_tst1.bin>>Table: ')
## window_closed('Record Editor')
close()
pass
| [
"bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec"
] | bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec |
6fd2f9cac3bf22b97948b2a190ce4a65f9c488ae | 4554f8d3ab1a6267b17dad2b4d2c47b0abe8d746 | /benchmarking/lab_driver.py | 03e7770f8347f387876b15dba21e7f83f446d948 | [
"Apache-2.0"
] | permissive | jteller/FAI-PEP | 44fead3ca26f4844067d455c86ac8c5bfaf79a14 | 73b8a08815675135e9da7d68375d1218cbd04eaa | refs/heads/master | 2020-04-29T06:04:19.197966 | 2019-03-15T23:32:54 | 2019-03-15T23:32:54 | 175,904,011 | 0 | 0 | Apache-2.0 | 2019-03-15T23:30:04 | 2019-03-15T23:30:04 | null | UTF-8 | Python | false | false | 5,441 | py | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os
from download_benchmarks.download_benchmarks import DownloadBenchmarks
from run_remote import RunRemote
from run_lab import RunLab
from harness import BenchmarkDriver
from repo_driver import RepoDriver as OSS_RepoDriver
from utils.custom_logger import getLogger, setLoggerLevel
parser = argparse.ArgumentParser(description="Download models from dewey")
parser.add_argument("--app_id",
help="The app id you use to upload/download your file for everstore")
parser.add_argument("-b", "--benchmark_file",
help="Specify the json file for the benchmark or a number of benchmarks")
parser.add_argument("--lab", action="store_true",
help="Indicate whether the run is lab run.")
parser.add_argument("--logger_level", default="warning",
choices=["info", "warning", "error"],
help="Specify the logger level")
parser.add_argument("--remote", action="store_true",
help="Submit the job to remote devices to run the benchmark.")
parser.add_argument("--root_model_dir", required=True,
help="The root model directory if the meta data of the model uses "
"relative directory, i.e. the location field starts with //")
parser.add_argument("--token",
help="The token you use to upload/download your file for everstore")
parser.add_argument("-c", "--custom_binary",
help="Specify the custom binary that you want to run.")
parser.add_argument("--pre_built_binary",
help="Specify the pre_built_binary to bypass the building process.")
parser.add_argument("--user_string",
help="If set, use this instead of the $USER env variable as the user string.")
class LabDriver(object):
def __init__(self, raw_args=None):
self.args, self.unknowns = parser.parse_known_args(raw_args)
setLoggerLevel(self.args.logger_level)
def run(self):
if not self.args.lab and not self.args.remote:
assert self.args.benchmark_file, \
"--benchmark_file (-b) must be specified"
if self.args.benchmark_file:
getLogger().info("Checking benchmark files to download")
dbench = DownloadBenchmarks(self.args,
getLogger())
dbench.run(self.args.benchmark_file)
if self.args.remote:
unique_args = [
"--app_id", self.args.app_id,
"--token", self.args.token,
]
if self.args.benchmark_file:
unique_args.extend([
"--benchmark_file", self.args.benchmark_file,
])
if self.args.pre_built_binary:
unique_args.extend([
"--pre_built_binary", self.args.pre_built_binary,
])
if self.args.user_string:
unique_args.extend([
"--user_string", self.args.user_string,
])
# hack to remove --repo from the argument list since python2
# argparse doesn't support allow_abbrev to be False, and it is
# the prefix of --repo_dir
if '--repo' in self.unknowns:
index = self.unknowns.index('--repo')
new_unknowns = self.unknowns[:index]
new_unknowns.extend(self.unknowns[index + 2:])
self.unknowns = new_unknowns
app_class = RunRemote
elif self.args.lab:
unique_args = [
"--app_id", self.args.app_id,
"--token", self.args.token,
]
app_class = RunLab
elif self.args.custom_binary or self.args.pre_built_binary:
if self.args.custom_binary:
binary = self.args.custom_binary
else:
binary = self.args.pre_built_binary
repo_info = {
"treatment": {
"program": binary, "commit": "-1", "commit_time": 0
}
}
unique_args = [
"--info \'", json.dumps(repo_info) + '\'',
"--benchmark_file", self.args.benchmark_file,
]
app_class = BenchmarkDriver
else:
if self.args.user_string:
usr_string = self.args.user_string
else:
usr_string = os.environ["USER"]
unique_args = [
"--benchmark_file", self.args.benchmark_file,
"--user_string", usr_string,
]
app_class = OSS_RepoDriver
raw_args = []
raw_args.extend(unique_args)
raw_args.extend(["--root_model_dir", self.args.root_model_dir])
raw_args.extend(["--logger_level", self.args.logger_level])
raw_args.extend(self.unknowns)
getLogger().info("Running {} with raw_args {}".format(app_class, raw_args))
app = app_class(raw_args=raw_args)
app.run()
if __name__ == "__main__":
raw_args = None
app = LabDriver(raw_args=raw_args)
app.run()
| [
"[email protected]"
] | |
9b1cc198ba049ed2a1e88ee56531681e0b4e438a | f4aec883b8073c4139046590d03907a751db6ab8 | /tests/snippets/pipeline/pipeline.py | 3d18442921f639077263c258ec8797f616f848ce | [] | no_license | turnhq/nucling | 1699d2a19154c4332c9836eace03ee21ae72ed41 | 56426954c6ca48e4f6d5314f9a7807dac986bce9 | refs/heads/master | 2020-03-28T06:56:30.360598 | 2019-04-10T21:10:33 | 2019-04-10T21:10:33 | 147,871,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,429 | py | import unittest
from nucling.snippet.pipelines import (
Pipeline, Pipeline_manager, Transform_keys_camel_case_to_snake,
Remove_nones,
)
class Pipeline_with_FUN( Pipeline ):
def FUN( p, x ):
return x + 15
class Test_pipeline( unittest.TestCase ):
def setUp( self ):
self.p = Pipeline()
self.q = Pipeline()
def test_when_the_class_dont_have_fun_should_raise_no_implemented( self ):
with self.assertRaises( NotImplementedError ):
Pipeline().process( {} )
def test_when_the_instance_is_assing_fun_should_run_the_function( self ):
result = Pipeline( fun=lambda x: x + 10 ).process( 10 )
self.assertEqual( result, 20 )
def test_when_the_pipiline_have_FUN_should_run_the_function( self ):
result = Pipeline_with_FUN().process( 40 )
self.assertEqual( result, 55 )
def test_when_combine_with_another_thing_should_return_a_manaager( self ):
result = self.p | self.q
self.assertIsInstance( result, Pipeline_manager )
def test_the_new_manager_should_contain_the_pipeline_and_the_other( self ):
result = self.p | self.q
self.assertIs( result.children[0], self.p )
self.assertIs( result.children[1], self.q )
def test_do_or_to_the_class_should_be_a_manager_with_both_class( self ):
result = Pipeline | Pipeline
self.assertIsInstance( result, Pipeline_manager )
self.assertIsInstance( result.children[0], type )
self.assertIsInstance( result.children[1], type )
class Test_camel_case( unittest.TestCase ):
def setUp( self ):
self.prev_dict = { 'HelloWorld': 'hello_world' }
self.result_dict = { 'hello_world': 'hello_world' }
def test_transform_key_to_camel_to_sanke_should_transform_the_keys( self ):
result = Transform_keys_camel_case_to_snake().process( self.prev_dict )
self.assertDictEqual( result, self.result_dict )
class Test_remove_nones( unittest.TestCase ):
def setUp( self ):
self.prev_dict = { 'nones': None, 'hello_world': 'hello_world' }
self.result_dict = { 'hello_world': 'hello_world' }
def test_remove_nones_should_no_return_a_none( self ):
result = Remove_nones().process(
{ 'day': None, 'month': None, 'year': '100' } )
result = Remove_nones().process( self.prev_dict )
self.assertDictEqual( result, self.result_dict )
| [
"[email protected]"
] | |
2cc6fe236c84cda705a1b8fec0493df1b53fd497 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03026/s973955145.py | a2924f6209bebcf3663a5e647cd0aaf7dd7eaa40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #実験用の写経
N = int(input())
g = {i: [] for i in range(N)}
for i in range(N - 1):
a, b = map(int, input().split())
g[a - 1].append(b - 1)
g[b - 1].append(a - 1)
c = list(map(int, input().split()))
c.sort()
print(sum(c[:-1]))
nums = [0] * N
stack = [0]
while stack:
d = stack.pop()
nums[d] = c.pop()
for node in g[d]:
if nums[node] == 0:
stack.append(node)
print(' '.join(map(str, nums)))
| [
"[email protected]"
] | |
01aba594e0438ffdd0367eefacb37bc81bbda437 | ff91e5f5815b97317f952038e19af5208ef12d84 | /square2.py | 98eb0412527458a11fcc5211790ef83d9f7ee25a | [] | no_license | BryanPachas-lpsr/class-samples | c119c7c1280ca2a86f24230d85f6c712f18d9be8 | a194201dce28299bd522295110814c045927ef5b | refs/heads/master | 2021-01-17T07:03:42.968452 | 2016-06-12T22:35:54 | 2016-06-12T22:35:54 | 48,007,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #square.py
import turtle
#make out turtle
buzz = turtle.Turtle()
#buzz makes a square
lines = 0
while lines < 4:
buzz.forward(150)
buzz.left(90)
lines = lines + 1
turtle.exitonclick()
| [
"lps@lps-1011PX.(none)"
] | lps@lps-1011PX.(none) |
23603c3747093f0f01f514546c24ce3bad2ff880 | fe6f6d11dde2a3205ae9758c7d4eb1f824b84102 | /venv/lib/python2.7/site-packages/logilab/common/test/unittest_ureports_html.py | c849c4f82d85d7321cc94b30f3be83ecd578cec2 | [
"MIT"
] | permissive | mutaihillary/mycalculator | ebf12a5ac90cb97c268b05606c675d64e7ccf8a6 | 55685dd7c968861f18ae0701129f5af2bc682d67 | refs/heads/master | 2023-01-10T14:56:11.780045 | 2016-09-20T12:30:21 | 2016-09-20T12:30:21 | 68,580,251 | 0 | 0 | MIT | 2022-12-26T20:15:21 | 2016-09-19T07:27:48 | Python | UTF-8 | Python | false | false | 2,918 | py | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
'''unit tests for ureports.html_writer
'''
__revision__ = "$Id: unittest_ureports_html.py,v 1.3 2005-05-27 12:27:08 syt Exp $"
from utils import WriterTC
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.ureports.html_writer import *
class HTMLWriterTC(TestCase, WriterTC):
def setUp(self):
self.writer = HTMLWriter(1)
# Section tests ###########################################################
section_base = '''<div>
<h1>Section title</h1>
<p>Section\'s description.
Blabla bla</p></div>
'''
section_nested = '''<div>\n<h1>Section title</h1>\n<p>Section\'s description.\nBlabla bla</p><div>\n<h2>Subsection</h2>\n<p>Sub section description</p></div>\n</div>\n'''
# List tests ##############################################################
list_base = '''<ul>\n<li>item1</li>\n<li>item2</li>\n<li>item3</li>\n<li>item4</li>\n</ul>\n'''
nested_list = '''<ul>
<li><p>blabla<ul>
<li>1</li>
<li>2</li>
<li>3</li>
</ul>
</p></li>
<li>an other point</li>
</ul>
'''
# Table tests #############################################################
table_base = '''<table>\n<tr class="odd">\n<td>head1</td>\n<td>head2</td>\n</tr>\n<tr class="even">\n<td>cell1</td>\n<td>cell2</td>\n</tr>\n</table>\n'''
field_table = '''<table class="field" id="mytable">\n<tr class="odd">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="even">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="odd">\n<td>f333</td>\n<td>v333</td>\n</tr>\n</table>\n'''
advanced_table = '''<table class="whatever" id="mytable">\n<tr class="header">\n<th>field</th>\n<th>value</th>\n</tr>\n<tr class="even">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="odd">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="even">\n<td>f333</td>\n<td>v333</td>\n</tr>\n<tr class="odd">\n<td> <a href="http://www.perdu.com">toi perdu ?</a></td>\n<td> </td>\n</tr>\n</table>\n'''
# VerbatimText tests ######################################################
verbatim_base = '''<pre>blablabla</pre>'''
if __name__ == '__main__':
unittest_main()
| [
"[email protected]"
] | |
f7ecb98c52d86587f015570263ac5a20bdfbe240 | 0567fcd808397a7024b5009cc290de1c414eff06 | /src/1658.minimum-operations-to-reduce-x-to-zero.py | 7f3176eb03955d6bbc0e2d39d5a8afa61e2fd290 | [] | no_license | tientheshy/leetcode-solutions | d3897035a7fd453b9f47647e95f0f92a03bff4f3 | 218a8a97e3926788bb6320dda889bd379083570a | refs/heads/master | 2023-08-23T17:06:52.538337 | 2021-10-03T01:47:50 | 2021-10-03T01:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | #
# @lc app=leetcode id=1658 lang=python3
#
# [1658] Minimum Operations to Reduce X to Zero
#
# @lc code=start
# TAGS: Greedy, Sliding Window
class Solution:
# LTE. Time and Space O(N^2).
def minOperations(self, nums: List[int], x: int) -> int:
q = [(x, 0, len(nums) - 1)]
visited = {}
depth = 0
while q:
cur = []
for x, left, right in q:
if x == 0: return depth
if (left, right) in visited and visited[(left, right)] <= depth: continue
visited[(left, right)] = depth
if x < 0 or left > right:
continue
cur.append((x - nums[left], left + 1, right))
cur.append((x - nums[right], left, right - 1))
depth += 1
q = cur
return -1
# Think in reverse, instead of finding the minmum prefix + suffix, we can find the subarray with maximum length
def minOperations(self, nums: List[int], x: int) -> int:
prefix_sum = [0]
for num in nums:
prefix_sum.append(prefix_sum[-1] + num)
y = prefix_sum[-1] - x
ans = -1
visited = {}
for i, num in enumerate(prefix_sum):
if y + num not in visited:
visited[y + num] = i
if num in visited:
ans = max(ans, i - visited[num])
if ans == -1: return -1
return len(nums) - ans
# @lc code=end
| [
"[email protected]"
] | |
ffcb5dfd61b8ea8406307f4d49316125cb08366c | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc006/B/4249708.py | c209881efba18f1680c0d7282393670ecf313f49 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | N,x = map(int,input().split())
if x in (1, 2 * N - 1):
print('No')
exit()
print('Yes')
if N == 2 and x == 2:
l = [1, 2, 3]
print(*l, sep='\n')
elif x == 2:
l = [4, 1, 2, 3]
rest = list(range(5, 2 * N))
m = len(rest) // 2
l = rest[:m] + l + rest[m:]
print(*l, sep='\n')
elif x == 2 * N - 2:
l = [x - 2, x + 1, x, x - 1]
rest = list(range(1, 2 * N - 4))
m = len(rest) // 2
l = rest[:m] + l + rest[m:]
print(*l, sep='\n')
else:
l = [x + 2, x - 1, x, x + 1, x - 2]
rest = list(range(1, x - 2)) + list(range(x + 3, 2 * N))
m = len(rest) // 2
l = rest[:m] + l + rest[m:]
print(*l, sep='\n') | [
"[email protected]"
] | |
4be4119618f24eb4a854b957e68ff64726717d61 | c27a95964b2740e1ec681b7068f52fb573d90321 | /aliyun-python-sdk-cms/aliyunsdkcms/request/v20180308/QueryMetricListRequest.py | 56216712133c7d35673a04cf20349e748613f843 | [
"Apache-2.0"
] | permissive | mysshget/aliyun-openapi-python-sdk | 5cf0a0277cce9823966e93b875c23231d8b32c8a | 333cdd97c894fea6570983e97d2f6236841bc7d3 | refs/heads/master | 2020-03-17T23:07:02.942583 | 2018-05-17T09:50:53 | 2018-05-17T09:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class QueryMetricListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2018-03-08', 'QueryMetricList','cms')
def get_Cursor(self):
return self.get_query_params().get('Cursor')
def set_Cursor(self,Cursor):
self.add_query_param('Cursor',Cursor)
def get_callby_cms_owner(self):
return self.get_query_params().get('callby_cms_owner')
def set_callby_cms_owner(self,callby_cms_owner):
self.add_query_param('callby_cms_owner',callby_cms_owner)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_Length(self):
return self.get_query_params().get('Length')
def set_Length(self,Length):
self.add_query_param('Length',Length)
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_Express(self):
return self.get_query_params().get('Express')
def set_Express(self,Express):
self.add_query_param('Express',Express)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_Metric(self):
return self.get_query_params().get('Metric')
def set_Metric(self,Metric):
self.add_query_param('Metric',Metric)
def get_Page(self):
return self.get_query_params().get('Page')
def set_Page(self,Page):
self.add_query_param('Page',Page)
def get_Dimensions(self):
return self.get_query_params().get('Dimensions')
def set_Dimensions(self,Dimensions):
self.add_query_param('Dimensions',Dimensions) | [
"[email protected]"
] | |
5b3a2e285dac25d8fbaf09b7b6ce6bb8623be7d1 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /training/old/detr/eval_step.py | 14c769d423b9428725a45145e5fecae4336afb35 | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | import torch
import Utils.detr_misc as utils
from evaluation.evaluator.coco import CocoEvaluator
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
return stats, coco_evaluator
| [
"[email protected]"
] | |
4fee8226361947afb1ef025ada908dc3ad5f97a7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2526/48083/309473.py | a8d534b2af549d2506c477c229047c81420f23b7 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
res = []
def inOrder(root):
if root:
inOrder(root.left)
res.append(root.val)
inOrder(root.right)
inOrder(root1)
inOrder(root2)
res = filter(None, res) #
return sorted(map(int,res))
def str2arr(self,t):
t = t[1:-1]
t = t.split(',')
return t
def creatTree(self,arr):
nodes = []
for a in arr:
node = TreeNode(a)
nodes.append(node)
parentNum = len(arr) // 2 - 1
for i in range(parentNum+1):
leftIndex = 2 * i + 1
rightIndex = 2 * i + 2
if nodes[leftIndex].val!='null':
nodes[i].left = nodes[leftIndex]
if rightIndex < len(arr) and nodes[rightIndex].val!='null':
nodes[i].right = nodes[rightIndex]
return nodes[0]
s = Solution()
t1 = input()
t2 = input()
t1 = s.str2arr(t1)
t2 = s.str2arr(t2)
root1 = s.creatTree(t1)
root2 = s.creatTree(t2)
res = s.getAllElements(root1, root2)
print(res) | [
"[email protected]"
] | |
beb51d68a2bfda9d9043f37aca7dfab32345ec5d | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/blog/migrations/0011_auto_20200502_1924.py | cafdf11a5790066124e2ac11c41e6c0b8e07572d | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 612 | py | # Generated by Django 2.2.12 on 2020-05-02 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20200229_1600'),
]
operations = [
migrations.AlterField(
model_name='blogcover',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Blue'), ('primary', 'Rose'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('default', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='warning', max_length=30, verbose_name='color'),
),
]
| [
"[email protected]"
] | |
2c8b29f4777567834b9d0affa686caba95f48ef3 | d1c29c9f06d56644ca2fb11fcff8c25703aced79 | /MMCG/make_plots.py | 891af441059f2c0d34f6177672eb9d172bde2fe6 | [] | no_license | jjhelmus/arm_vap_scripts | 4a3d7bbe9e277972312484fe46a35c92dae1c71c | 1d49d0e2f8affea11aabc000f74d8d1c4be75ef5 | refs/heads/master | 2021-01-22T05:24:35.935447 | 2013-04-12T14:38:31 | 2013-04-12T14:38:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | #!/usr/bin/env python
import matplotlib.pyplot as plt
import netCDF4
import pyart
# MMCG figure
dataset = netCDF4.Dataset('sgpcsaprmmcgi7.c0.20110520.110100.nc')
refl = dataset.variables['reflectivity_horizontal']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(refl[0,4], origin='lower')
fig.savefig('mapped_figure.png')
# Test
dataset = netCDF4.Dataset('foo.dir/sgpcsaprmmcgI7.c0.20110520.110100.nc')
refl = dataset.variables['reflectivity_horizontal']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(refl[0,4], origin='lower')
fig.savefig('exp_figure.png')
# Radial coords
"""
radar = pyart.io.read_netcdf('sgpcsaprsurcmacI7.c0.20110520.110100.nc')
display = pyart.graph.RadarDisplay(radar)
fig = plt.figure()
ax = fig.add_subplot(111)
display.plot_ppi('reflectivity_horizontal', 0, vmin=-16, vmax=48)
fig.savefig('radial_figure.png')
"""
| [
"[email protected]"
] | |
0396eb254b1de5fa42497fb6a7b869393ca51085 | 29c58b3bec6ac0fcdb3070efc118600ee92004da | /test/test_unread_count.py | 42aeede4b08156288cd84090b1b6d8c211d1374e | [
"MIT"
] | permissive | mailslurp/mailslurp-client-python | a2b5a0545206714bd4462ae517f242852b52aaf9 | 5c9a7cfdd5ea8bf671928023e7263847353d92c4 | refs/heads/master | 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.unread_count import UnreadCount # noqa: E501
from mailslurp_client.rest import ApiException
class TestUnreadCount(unittest.TestCase):
"""UnreadCount unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test UnreadCount
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.unread_count.UnreadCount() # noqa: E501
if include_optional :
return UnreadCount(
count = 56
)
else :
return UnreadCount(
count = 56,
)
def testUnreadCount(self):
"""Test UnreadCount"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8979b51513153b74eb4a26efe75e95c67319ebef | 40b42ccf2b6959d6fce74509201781be96f04475 | /mmocr/models/textdet/necks/fpem_ffm.py | c98b43f1fc2642db598a0f9094b88e4851cc9e75 | [
"Apache-2.0"
] | permissive | xdxie/WordArt | 2f1414d8e4edaa89333353d0b28e5096e1f87263 | 89bf8a218881b250d0ead7a0287526c69586c92a | refs/heads/main | 2023-05-23T02:04:22.185386 | 2023-03-06T11:51:43 | 2023-03-06T11:51:43 | 515,485,694 | 106 | 12 | null | null | null | null | UTF-8 | Python | false | false | 5,999 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.runner import BaseModule, ModuleList
from torch import nn
from mmocr.models.builder import NECKS
class FPEM(BaseModule):
"""FPN-like feature fusion module in PANet.
Args:
in_channels (int): Number of input channels.
init_cfg (dict or list[dict], optional): Initialization configs.
"""
def __init__(self, in_channels=128, init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.up_add1 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add2 = SeparableConv2d(in_channels, in_channels, 1)
self.up_add3 = SeparableConv2d(in_channels, in_channels, 1)
self.down_add1 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add2 = SeparableConv2d(in_channels, in_channels, 2)
self.down_add3 = SeparableConv2d(in_channels, in_channels, 2)
def forward(self, c2, c3, c4, c5):
"""
Args:
c2, c3, c4, c5 (Tensor): Each has the shape of
:math:`(N, C_i, H_i, W_i)`.
Returns:
list[Tensor]: A list of 4 tensors of the same shape as input.
"""
# upsample
c4 = self.up_add1(self._upsample_add(c5, c4)) # c4 shape
c3 = self.up_add2(self._upsample_add(c4, c3))
c2 = self.up_add3(self._upsample_add(c3, c2))
# downsample
c3 = self.down_add1(self._upsample_add(c3, c2))
c4 = self.down_add2(self._upsample_add(c4, c3))
c5 = self.down_add3(self._upsample_add(c5, c4)) # c4 / 2
return c2, c3, c4, c5
def _upsample_add(self, x, y):
return F.interpolate(x, size=y.size()[2:]) + y
class SeparableConv2d(BaseModule):
def __init__(self, in_channels, out_channels, stride=1, init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.depthwise_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
padding=1,
stride=stride,
groups=in_channels)
self.pointwise_conv = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels, kernel_size=1)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
x = self.bn(x)
x = self.relu(x)
return x
@NECKS.register_module()
class FPEM_FFM(BaseModule):
"""This code is from https://github.com/WenmuZhou/PAN.pytorch.
Args:
in_channels (list[int]): A list of 4 numbers of input channels.
conv_out (int): Number of output channels.
fpem_repeat (int): Number of FPEM layers before FFM operations.
align_corners (bool): The interpolation behaviour in FFM operation,
used in :func:`torch.nn.functional.interpolate`.
init_cfg (dict or list[dict], optional): Initialization configs.
"""
def __init__(self,
in_channels,
conv_out=128,
fpem_repeat=2,
align_corners=False,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super().__init__(init_cfg=init_cfg)
# reduce layers
self.reduce_conv_c2 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels[0],
out_channels=conv_out,
kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c3 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels[1],
out_channels=conv_out,
kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c4 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels[2],
out_channels=conv_out,
kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.reduce_conv_c5 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels[3],
out_channels=conv_out,
kernel_size=1), nn.BatchNorm2d(conv_out), nn.ReLU())
self.align_corners = align_corners
self.fpems = ModuleList()
for _ in range(fpem_repeat):
self.fpems.append(FPEM(conv_out))
def forward(self, x):
"""
Args:
x (list[Tensor]): A list of four tensors of shape
:math:`(N, C_i, H_i, W_i)`, representing C2, C3, C4, C5
features respectively. :math:`C_i` should matches the number in
``in_channels``.
Returns:
list[Tensor]: Four tensors of shape
:math:`(N, C_{out}, H_0, W_0)` where :math:`C_{out}` is
``conv_out``.
"""
c2, c3, c4, c5 = x
# reduce channel
c2 = self.reduce_conv_c2(c2)
c3 = self.reduce_conv_c3(c3)
c4 = self.reduce_conv_c4(c4)
c5 = self.reduce_conv_c5(c5)
# FPEM
for i, fpem in enumerate(self.fpems):
c2, c3, c4, c5 = fpem(c2, c3, c4, c5)
if i == 0:
c2_ffm = c2
c3_ffm = c3
c4_ffm = c4
c5_ffm = c5
else:
c2_ffm = c2_ffm + c2
c3_ffm = c3_ffm + c3
c4_ffm = c4_ffm + c4
c5_ffm = c5_ffm + c5
# FFM
c5 = F.interpolate(
c5_ffm,
c2_ffm.size()[-2:],
mode='bilinear',
align_corners=self.align_corners)
c4 = F.interpolate(
c4_ffm,
c2_ffm.size()[-2:],
mode='bilinear',
align_corners=self.align_corners)
c3 = F.interpolate(
c3_ffm,
c2_ffm.size()[-2:],
mode='bilinear',
align_corners=self.align_corners)
outs = [c2_ffm, c3, c4, c5]
return tuple(outs)
| [
"[email protected]"
] | |
3652511f4a3c2e9b77748a3cf8132b152949bf44 | ffe4c155e228f1d3bcb3ff35265bb727c684ec1a | /Codes/Quiz/number_of_factors.py | 68ce727aa40589c018b92480a89b9de9e4e47ed7 | [] | no_license | yuuee-www/Python-Learning | 848407aba39970e7e0058a4adb09dd35818c1d54 | 2964c9144844aed576ea527acedf1a465e9a8664 | refs/heads/master | 2023-03-12T00:55:06.034328 | 2021-02-28T13:43:14 | 2021-02-28T13:43:14 | 339,406,816 | 0 | 0 | null | 2021-02-28T11:27:40 | 2021-02-16T13:26:46 | Jupyter Notebook | UTF-8 | Python | false | false | 269 | py | def numberOfFactors(num):
ans = 1
x = 2
while x * x <= num:
cnt = 1
while num % x == 0:
cnt += 1
num /= x
ans = cnt
x += 1
return ans * (1 + (num > 1))
n = int(input())
print(numberOfFactors(n))
| [
"[email protected]"
] | |
23b8ea48b8dcdfd520fd983a55990ac4992ded00 | 4017add8fa767cf2eca9163791aa65ee77c67a07 | /code/gradient_descent/first.py | 2a722d65bc85c82cd02c686f9e1e382f1907852a | [] | no_license | ducksfrogs/numpy_data_ana | 00c0928f2ddc7a8ad0ea9ecdefa3815a8d880969 | 9d89bc377a3015c19c74f6b5aa500f2f2f08cdb1 | refs/heads/master | 2022-10-19T22:37:10.314453 | 2020-06-09T00:05:23 | 2020-06-09T00:05:23 | 268,383,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
dataset = load_boston()
samples, label, feature_names = dataset.data, dataset.target, dataset.feature_names
bostondf = pd.DataFrame(dataset.data)
bostondf.columns = dataset.feature_names
bostondf['Target price'] = dataset.target
bostondf.head()
bostondf.plot(x='RM', y='Target price', style='o')
def prediction(X, coefficient, intercept):
return X*coefficient + intercept
def cost_function(X, Y, coefficient, intercept):
MSE = 0.0
for i in range(len(X)):
MSE += (Y[i] -(coefficient*X[i] + intercept))**2
return MSE / len(X)
def update_weights(X, Y, coefficient, intercept, learning_rate):
coefficient_derivative = 0
intercept_derivative = 0
for i in range(len(X)):
coefficient_derivative += -2*X[i] *(Y[i] -(coefficient * X[i] + intercept))
intercept_derivative += -2*(Y[i] - (coefficient* X[i] + intercept))
coefficient -= (coefficient_derivative / len(X)) * learning_rate
intercept -= (intercept_derivative / len(X)) * learning_rate
return coefficient, intercept
def train(X, Y, coefficient, intercept, learning_rate, iteration):
cost_hist = []
for i in range(iteration):
coefficient, intercept = update_weights(X, Y, coefficient, intercept, learning_rate)
cost = cost_function(X, Y, coefficient, intercept)
cost_hist.append(cost)
return coefficient, intercept, cost_hist
learning_rate = 0.01
iteration = 10001
coefficient = 0.3
intercept = 2
X = bostondf.iloc[:, 5:6].values
Y = bostondf.iloc[:, 13:14].values
# coefficient, intercept, cost_history = train(X, Y, coefficient, intercept, learning_rate, iteration)
coefficient, intercept, cost_history = train(X, Y, coefficient, intercept=2, learning_rate=0.01, iteration=10001)
y_hat = X*coefficient + intercept
plt.plot(X, Y, 'bo')
plt.plot(X, y_hat)
plt.show()
| [
"[email protected]"
] | |
fda009d969b4c11b4518f554302e60e88490b46b | 0f09759025db447fe63b3af0af80c3e31e2a887f | /scripts/cell/taskScripts/Bangzhushenmiren.py | 06b8d8ab06630b18f47c4ebd930e3d56d5de5726 | [] | no_license | jevonhuang/huanhuoserver | d7db1cd4c67d8be2da4dc9ec84ef8f23e891c537 | caa8a87cd303b4d0368a0a6397fc1d47685c3bc3 | refs/heads/master | 2020-12-07T16:47:40.668507 | 2018-04-02T10:12:01 | 2018-04-02T10:12:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
class Bangzhushenmiren(object):
def __init__(self, owner, selfIndex, npcName, npcTaskIndex):
DEBUG_MSG("Bangzhushenmiren:__init__")
self.owner = owner
self.selfIndex = selfIndex
self.npcName = npcName
self.npcTaskIndex = npcTaskIndex
self.owner.setAttr("Bangzhushenmiren_TaskCounter", 1)
self.oldTaskCounter = self.owner.getAttr("Bangzhushenmiren_TaskCounter")
def detectTaskCompleteness(self):
self.owner.setAttr("Bangzhushenmiren_TaskCounter", 0)
if self.owner.getAttr("Bangzhushenmiren_TaskCounter") == 0:
self.owner.setTaskFinish(self.npcName, self.npcTaskIndex, self.selfIndex)
| [
"[email protected]"
] | |
f434d074c2a942412002f5c9efc9a15c033dacc0 | 5472a3f913e1a6698b9dab902545f0ba02e7a02e | /pbay_url.py | 30c40f18b360964362158d06ed0107620e90d399 | [] | no_license | Arrowheadahp/piratebay-search-and-download | bf38956588ce6da8caf25cec653bec76409cfd79 | 0fe8db913215e4a0b00a9153e7085728e7d3ecf7 | refs/heads/master | 2020-05-31T05:56:18.592671 | 2019-07-20T06:15:26 | 2019-07-20T06:15:26 | 190,131,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import webbrowser
def soupcreate(url):
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
#print ('url page read')
return(BeautifulSoup(webpage,features="lxml"))
def geturl():
proxylist=soupcreate('https://piratebay-proxylist.se/')
proxy=proxylist.find('td',{'class':'url'})
proxyurl=proxy.get('data-href')
return (proxyurl)
if __name__=='__main__':
print (geturl())
webbrowser.open(geturl())
| [
"[email protected]"
] | |
8d704be2ad0bccea7611b5a9eac75d47a7e74899 | f448b9635d076d88a4439e937eec7dd050cc316a | /xx.py | 6a1bcaeeb2767fb3a0468cbdf1fb2786afa1066f | [] | no_license | udaytejam/practicesamples | c7e6ba2e30f52138b3b22414c57ddc1f9e94162a | acda24dfe5c3aff60b688c9b434b83a3132b0af1 | refs/heads/master | 2021-01-10T02:03:51.456102 | 2015-10-05T11:23:42 | 2015-10-05T11:23:42 | 43,500,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | globvar = 10
def read1():
print(globvar)
def write1():
global globvar
globvar = 5
def write2():
globvar = 15
read1()
write1()
read1()
write2()
read1() | [
"[email protected]"
] | |
5253d398213d1c154ea2dffba964210fbf476c74 | e33ecdb112045327344dce2ae8b0612848938f24 | /cotidia/socialshare/conf.py | 1e7d660917e60fb666ce61f86598c24b02e1edef | [
"BSD-3-Clause"
] | permissive | guillaumepiot/cotidia-social-share | 939125b97474bb34e8a94cd0fa6d6919026c029c | 9c926bb86e7f158f2b59eaddcf09eba459c009b6 | refs/heads/master | 2020-04-21T07:28:24.520846 | 2019-03-26T14:00:00 | 2019-03-26T14:00:00 | 169,393,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django.conf import settings
from appconf import AppConf
class SocialShareConf(AppConf):
FACEBOOK_APP_ID = "[Not implemented]"
class Meta:
prefix = 'socialshare'
| [
"[email protected]"
] | |
072d371ce95370c4977fcc64b3a3e77c06ca6c30 | 5f07c38899e350b0b776510fd7d7831d44cf1404 | /drfmixins/drfmixins/settings.py | d599783f7b76ad7f17b66c1c6fd0e90c0991e475 | [] | no_license | shubham454/Django-Rest | b733f1d47ada9df452e912dcd8acad48a7ec4c75 | 3d94f57cab3537c51caa68807d5fcdf8883d2d2c | refs/heads/master | 2022-12-14T20:37:11.835794 | 2020-08-13T18:43:26 | 2020-08-13T18:43:26 | 287,354,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | """
Django settings for drfmixins project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z&7-uzdyn7cex&u5yzfw&wh$j8_v71pu@!4rc9lu@c#8y(!_^('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drfmixins.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drfmixins.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
9f25d416bd468bb65eb3923ab99d32b912f60ca7 | 3e85618c79a1a934fec543e1327e772ca081a5b9 | /N1949.py | 2c0945dcd3d845154cc7480e681a4eb6834ef180 | [] | no_license | ghdus4185/SWEXPERT | 72d79aa4a668452327a676a644b952bab191c79b | 4dc74ad74df7837450de4ce55526dac7760ce738 | refs/heads/master | 2020-07-16T18:31:22.153239 | 2019-12-20T04:18:30 | 2019-12-20T04:18:30 | 205,843,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | import sys
sys.stdin = open('sample_input.txt', 'r')
# 가장 높은 봉우리를 찾아야한다
# 내 주변을 선택할 때 나보다 낮은 얘들을 선택하거나 한번 깎아서 선택할 수 있다.
# 이후에 깎는게 더 유리할 수 있으므로
# 1) 낮은 칸으로 이동해보기
# 2) 높거나 같은 칸에 대해서 2가지 선택 깍는다 or 깍지않는다.
# 3) 깍아서 지나갈 수 있는 상황이라면 굳이 많이 깍지 않고 딱 나보다 작은 정도만
# 깍는다.
def f(i, j, c, e): # c : 깍는 횟수, e : 이동거리
di = [0, 1, 0, -1]
dj = [1, 0, -1, 0]
global N, K, maxV, visited, arr
if maxV < e:
maxV = e
visited[i][j] = 1 # 등산로에 포함되었음을 표시
#주변탐색
for k in range(4):
ni = i + di[k]
nj = j + dj[k]
if ni >= 0 and ni < N and nj >= 0 and nj< N: # 유효좌표인지 확인
if arr[i][j] > arr[ni][nj]:
f(ni, nj, c, e+1) # 주변의 낮은 점으로 이동
elif visited[ni][nj] == 0 and c > 0 and arr[i][j] > arr[ni][nj]-K:
# 주변 점을 깍아서 이동
org = arr[ni][nj] # 원래 높이 저장
arr[ni][nj] = arr[i][j] -1 # 주변 점을 깍아서 이동
f(ni, nj, 0, e+1)
arr[ni][nj] = org # 높이 원상 복구
# 돌아왔을 때를 생각해서 깍기 전 높이를 저장해둔다
visited[i][j] = 0 # 다른 경로의 등산로에 포함될 수 있으므로
return
T = int(input())
for tc in range(T):
N, K = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
visited = [[0]*N for _ in range(N)]
h = 0
for i in range(N):
for j in range(N):
if h < arr[i][j]:
h = arr[i][j]
maxV = 0
for i in range(N):
for j in range(N):
if arr[i][j] == h:
f(i, j, 1, 1)
print('#{} {}'.format(tc+1, maxV)) | [
"[email protected]"
] | |
b1532dca490f5b992fcd2d4448901b761f3b2807 | 025dc1fa797b0de25b556365d23bddb848ab8ce0 | /colossus/apps/lists/mixins.py | ec6726113587e1a0aef7d4b9d7aedb437406729a | [
"MIT"
] | permissive | ramanaditya/colossus | eab49ec33031b8542b07e3aaebc36467a97786d6 | 11b34a216b2021a5da79cd6e347aef842f7b0c72 | refs/heads/master | 2023-03-30T12:39:12.948490 | 2021-03-25T17:11:32 | 2021-03-25T17:11:32 | 340,977,981 | 1 | 0 | MIT | 2021-03-25T16:34:54 | 2021-02-21T18:51:05 | Python | UTF-8 | Python | false | false | 1,295 | py | from django.http import Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import ContextMixin
from colossus.apps.subscribers.constants import TemplateKeys
from colossus.apps.subscribers.models import SubscriptionFormTemplate
from .models import MailingList
class MailingListMixin(ContextMixin):
__mailing_list = None
@property
def mailing_list(self):
if self.__mailing_list is None:
self.__mailing_list = get_object_or_404(MailingList, pk=self.kwargs.get('pk'))
return self.__mailing_list
def get_context_data(self, **kwargs):
if 'menu' not in kwargs:
kwargs['menu'] = 'lists'
if 'mailing_list' not in kwargs:
kwargs['mailing_list'] = self.mailing_list
return super().get_context_data(**kwargs)
class FormTemplateMixin:
def get_object(self):
mailing_list_id = self.kwargs.get('pk')
key = self.kwargs.get('form_key')
if key not in TemplateKeys.LABELS.keys():
raise Http404
form_template, created = SubscriptionFormTemplate.objects.get_or_create(
key=key,
mailing_list_id=mailing_list_id
)
if created:
form_template.load_defaults()
return form_template
| [
"[email protected]"
] | |
2d25948fc47ae05e17ec0c8404dc6012cc0a51f0 | f9c7969c8649c484f2460fb245a3d5bd6870fa5a | /ch07/exercises/exercise 35.py | 85def5a86980f358fd4a9a1b39f5216c13556056 | [] | no_license | Pshypher/tpocup | 78cf97d51259bfea944dc205b9644bb1ae4ab367 | b05b05728713637b1976a8203c2c97dbbfbb6a94 | refs/heads/master | 2022-05-18T13:11:31.417205 | 2020-01-07T13:50:06 | 2020-01-07T13:50:06 | 260,133,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Unless stated otherwise, variables are assumed to be of the str data type
def reverse_string(S):
"""Return the string S in reverse order using a for loop."""
S_reverse = ""
for ch in S:
S_reverse = ch + S_reverse
return S_reverse
# Prompt user for a string
chars = input("Enter a sequence of alphanumeric chars: ")
print(reverse_string(chars))
| [
"[email protected]"
] | |
2ca7726a97e24168ecf4147fb619ac3d3540182e | d1808d8cc5138489667b7845466f9c573591d372 | /notebooks/Reproducible Papers/Syngine_2016/figure_2_source_width.py | 7eb1deaeb1cbee060358396def82df02fcfa286e | [] | no_license | krischer/seismo_live | e140777900f6246a677bc28b6e68f0a168ec41ab | fcc615aee965bc297e8d53da5692abb2ecd6fd0c | refs/heads/master | 2021-10-20T22:17:42.276096 | 2019-11-27T23:21:16 | 2019-11-28T10:44:21 | 44,953,995 | 69 | 59 | null | 2020-05-22T11:00:52 | 2015-10-26T08:00:42 | Python | UTF-8 | Python | false | false | 5,880 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + {"deletable": true, "editable": true, "cell_type": "markdown"}
# <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Reproducible Papers - Syngine Paper</div>
# </div>
# </div>
# </div>
# + {"deletable": true, "editable": true, "cell_type": "markdown"}
# ---
#
# # Figure 2: Source Width Parameter
#
# This notebook is part of the supplementary materials for the Syngine paper and reproduces figure 2.
#
# Requires matplotlib >= 1.5 and an ObsPy version with the syngine client (>= 1.0) as well as instaseis.
#
# ##### Authors:
# * Lion Krischer ([@krischer](https://github.com/krischer))
# + {"deletable": true, "editable": true}
# %matplotlib inline
import obspy
import matplotlib.pyplot as plt
import numpy as np
plt.style.use("seaborn-whitegrid")
import copy
import io
import instaseis
import json
import requests
# + {"deletable": true, "editable": true}
SYNGINE_URL = "http://service.iris.edu/irisws/syngine/1/query"
# + {"deletable": true, "editable": true}
network = "IU"
station = "ANMO"
# Get station information from the IRIS FDSN service.
from obspy.clients.fdsn import Client
c = Client("IRIS")
print(c.get_stations(network=network, station=station, format="text")[0][0])
# + {"deletable": true, "editable": true}
# The param file is only used to extract the source parameters. This is
# thus consistent with the other figures but can of course also be done
# differently.
filename = "chile_param.txt"
# Parse the finite source wiht instaseis.
finite_source = instaseis.FiniteSource.from_usgs_param_file(filename)
# Compute the centroid of it.
finite_source.compute_centroid()
# src is now the centroid of the finite source.
src = finite_source.CMT
# Common query parametersh su
params_common = {
# IU.ANMO
"receiverlatitude": 34.95,
"receiverlongitude": -106.46,
"dt": 0.1,
"origintime": src.origin_time,
"components": "Z",
"model": "ak135f_2s",
"format": "miniseed",
"units": "velocity"}
# Parameters only needed for the point source.
params_ps = copy.deepcopy(params_common)
params_ps["sourcelatitude"] = src.latitude
params_ps["sourcelongitude"] = src.longitude
params_ps["sourcedepthinmeters"] = src.depth_in_m
params_ps["sourcemomenttensor"] = ",".join(
str(getattr(src, _i)) for _i in ("m_rr", "m_tt", "m_pp", "m_rt", "m_rp", "m_tp"))
print(finite_source)
print(finite_source.CMT)
# + {"deletable": true, "editable": true}
import copy
import collections
seis = collections.OrderedDict()
source_widths = [2.5, 5, 10, 25, 50, 100]
# Request one seismogram for each source with.
for sw in source_widths:
p = copy.deepcopy(params_ps)
# The sourcewidth parameter steers the width of the STF.
p["sourcewidth"] = sw
# Send it alongside.
r = requests.get(url=SYNGINE_URL, params=p)
assert r.ok, str(r.reason)
# Get the data and parse it as an ObsPy object.
with io.BytesIO(r.content) as f:
tr = obspy.read(f)[0]
seis[sw] = tr
# Plot only some phases.
tr.slice(tr.stats.starttime + 1000, tr.stats.starttime + 1500).plot()
# + {"deletable": true, "editable": true}
import matplotlib.gridspec as gridspec
# Plotting setup.
fig = plt.figure(figsize=(10, 3))
gs1 = gridspec.GridSpec(1, 1, wspace=0, hspace=0, left=0.05,
right=0.62, bottom=0.14, top=0.99)
ax1 = fig.add_subplot(gs1[0])
gs2 = gridspec.GridSpec(1, 1, wspace=0, hspace=0, left=0.65,
right=0.94, bottom=0.14, top=0.99)
ax2 = fig.add_subplot(gs2[0])
plt.sca(ax1)
# Now plot all the seismograms.
for _i, (sw, tr) in enumerate(seis.items()):
tr.normalize()
plt.plot(tr.times(), 2.0 * tr.data - _i * 3, color="0.1")
plt.legend()
plt.xlim(0, 2000)
plt.yticks([0, -3, -6, -9, -12, -15], [str(_i) for _i in source_widths])
plt.ylim(-17, 2)
plt.xlabel("Time since event origin [sec]")
plt.ylabel("Source width [sec]")
plt.sca(ax2)
# Use an internal instaseis function to get the used STF.
from instaseis.server.util import get_gaussian_source_time_function
dt = 0.01
# Plot all the source time functions.
for _i, sw in enumerate(source_widths):
sr = get_gaussian_source_time_function(sw, dt)[1]
#sr = np.concatenate([sr2, np.zeros(1000)])
alpha = 0.4 - _i * 0.4 / len(source_widths)
plt.fill_between(np.arange(len(sr)) * dt - sw, sr, color="0.0", alpha=alpha, linewidth=0)
if sw == 25:
plt.plot(np.arange(len(sr)) * dt - sw, sr, color="0.0", lw=2)
ax2.annotate('25 sec', xy=(5, 0.07), xytext=(8, 0.10),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.grid(True)
plt.xlim(-20, 20)
plt.ylim(-0.0005, 0.16)
plt.xticks([-10, 0, 10])
plt.yticks([0, 0.04, 0.08, 0.12])
plt.xlabel("Time [sec]")
plt.ylabel("Slip rate [m/sec]")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.yaxis.set_tick_params(length=2)
ax2.yaxis.set_tick_params(pad=4)
ax2.xaxis.set_tick_params(length=2)
ax2.xaxis.set_tick_params(pad=4)
ax2.xaxis.set_tick_params(color="#CCCCCC")
ax2.yaxis.set_tick_params(color="#CCCCCC")
plt.savefig("source_width.pdf")
| [
"[email protected]"
] | |
6905fda86703d56d27ced0178a27ebf687bb1da0 | d18df0ec22dc766496d4b0c2dcdcc933bdf332d8 | /utils.py | f15c3122cd9f699a4a7cf4c18cdcaea62d5eff1b | [] | no_license | thanhlt998/tktdtt | edc6610a28e09482f0746db258eed5323636abaa | 64f32e62fb3b2d5d6ef6c2a0e74294bdff4b2057 | refs/heads/master | 2022-03-21T07:24:59.104986 | 2019-12-17T02:32:25 | 2019-12-17T02:32:25 | 208,956,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | from pyvi.ViTokenizer import ViTokenizer
import re
from dateutil.parser import parse
import json
def tokenize(terms):
terms = ViTokenizer.tokenize(terms)
terms = [f"\"{re.sub(r'_', ' ', term)}\"" for term in re.findall(r'\S+', terms)]
return ' '.join(terms)
def time_str2iso_format(time_str, is_24h_format=True):
time = re.search(fr'\d[\d/:,\- ]+[\d{"AMP" if is_24h_format else ""}]+', time_str)[0]
time = parse(time)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def read_jsonl_file(fn):
docs = []
with open(fn, mode='r', encoding='utf8') as f:
for line in f:
docs.append(json.loads(line))
f.close()
return docs
def read_json_file(fn):
with open(fn, mode='r', encoding='utf8') as f:
docs = json.load(f)
f.close()
return docs
def dump_jsonl_file(fn, docs):
with open(fn, mode='w', encoding='utf8') as f:
for doc in docs:
f.write(json.dumps(doc, ensure_ascii=False))
f.close()
if __name__ == '__main__':
# docs = read_json_file('data/data_baomoi.json')
docs = read_jsonl_file('data/24h.jsonl')
print(docs[:2])
| [
"[email protected]"
] | |
3abcc4770b5d3213f9bbe698c4fd2bd2e30bc2df | 015ce35e6344d1726173594ae509dfc1ca6f856d | /3-OOP and DSA/4-Recursion/Study/5-fibonichi.py | cd8fcc970c153783d338b2223d11fd4aeb930ddb | [] | no_license | ayman-elkassas/Python-Notebooks | 4af80df75c15a6ac3049450b3920d500fef0e581 | 26a8265f458c40ac22965d55722f32a650851683 | refs/heads/master | 2023-04-03T19:12:17.707673 | 2021-04-10T21:32:37 | 2021-04-10T21:32:37 | 356,699,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | # Fn
# = F
# n−2 + Fn−1 for n > 1.
# import gzip
# gzip.GzipFile.readline(r"C:\Users\Ayman Elkassas\Desktop\dump.txt",)
def fib(n):
if n<=1:
return n
else:
return fib(n-1)+fib(n-2)
print(fib(5))
| [
"[email protected]"
] | |
6fc250290cd0b7389544fbe3a86bdc07265dc7d7 | 8eccc4cab7ba7292c932468163c711d4058e3b90 | /app/inheritance/abstract/migrations/0003_auto_20191223_0612.py | 5f9ce7809d3b1fe08e15168d3691200f35a33369 | [] | no_license | zehye/django-document-wps12 | 97b1aa4be5a56b949ba59ac92e8d0c5cb3e22f73 | 086fdc581ba3f2db7bc39a6eb906fd97cc61c415 | refs/heads/master | 2022-09-08T12:46:19.110011 | 2019-12-26T09:07:15 | 2019-12-26T09:07:15 | 228,784,564 | 0 | 0 | null | 2022-08-23T17:59:03 | 2019-12-18T07:37:14 | Python | UTF-8 | Python | false | false | 737 | py | # Generated by Django 3.0 on 2019-12-23 06:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_auto_20191223_0539'),
]
operations = [
migrations.AlterField(
model_name='childa',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childa', to='abstract.Student'),
),
migrations.AlterField(
model_name='childb',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childb', to='abstract.Student'),
),
]
| [
"[email protected]"
] | |
10a6013dcc36183777720bbc2952c93d81e122df | 0f60e5a4bffa7372f6461aba4f0e58de4e3508bb | /Pandas/panda21.py | 00ddfd6fe203e441b705dfd802516e4eaf340740 | [] | no_license | akshatrastogi25/Python | 519130d6671438d20b0e6928e597e2b9c5bf722f | a3e8a1cbc96d09e4f8a6674c23c74074bfb65a9a | refs/heads/master | 2023-03-26T02:14:14.092925 | 2021-03-25T12:10:31 | 2021-03-25T12:10:31 | 286,788,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import pandas as pd
one = pd.DataFrame({
'Name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'],
'subject_id':['sub1','sub2','sub4','sub6','sub5'],
'Marks_scored':[98,90,87,69,78]},
index=[1,2,3,4,5])
two = pd.DataFrame({
'Name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'],
'subject_id':['sub2','sub4','sub3','sub6','sub5'],
'Marks_scored':[89,80,79,97,88]},
index=[1,2,3,4,5])
print pd.concat([one,two],axis=1) | [
"[email protected]"
] | |
b8c70af9726a94eba9ac6a43188c0994be97dfcb | cdc9a8bc051be72de5bace23fd0637701d699da3 | /preprocess/create_stanford_labels.py | 880bf6d76e11854488987df9b35ea38a1836deac | [
"Apache-2.0"
] | permissive | marshuang80/pe-slice-finder | 4a51a8f7ef90f836d3cd5935f89a3e7f13c1fd63 | 2426a55c404e8eb694110351d604d6bdd613e5ae | refs/heads/master | 2022-12-29T02:20:42.135931 | 2020-10-13T04:16:47 | 2020-10-13T04:16:47 | 296,091,898 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import os
import sys
sys.path.append(os.getcwd())
import h5py
import pickle
import argparse
import pandas as pd
from constants import *
from tqdm import tqdm
from collections import defaultdict
def main(args):
# create hdf5 file
hdf5_fh = h5py.File(args.hdf5_file, 'a')
slice_labels = pickle.load(open(args.pickle_file, 'rb'))
results = defaultdict(list)
for series in hdf5_fh.keys():
# skip if no labelss
if series not in slice_labels.keys():
continue
for slice_idx in range(hdf5_fh[series].shape[0]):
label = 1 if slice_idx in slice_labels[series] else 0
results['series'].append(series)
results['slice_idx'].append(slice_idx)
results['label'].append(label)
# save as csv
df = pd.DataFrame.from_dict(results)
df.to_csv('slice_labels.csv')
# clean up
hdf5_fh.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--hdf5_file', type=str, default='/data4/PE_stanford/Stanford_data/data.hdf5')
parser.add_argument('--pickle_file', type=str, default='/data4/PE_stanford/Stanford_data/slice_labels.pkl')
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
2dd331830c8da0eca6ca46d05d214d1443501f2f | 4ede275efc8bc9f9ef121dc37215d2f0d8453e36 | /primer1.py | 20a96af89513d28f097429ac8bc17040ee3ff8f6 | [] | no_license | shanthivimalanataraajan01/code | bfa8a441b0c360aebd02248ad4433cc21889c3d2 | ea467ae1eefd68a5dceaa53aab7149d31bd5faf6 | refs/heads/master | 2020-04-15T05:01:03.625422 | 2019-05-17T09:35:45 | 2019-05-17T09:35:45 | 164,405,963 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | #vimala
#hi
m,n=map(int,input().split())
x=' '
for n in range(m+1,n):
if n>0:
for i in range(2,n):
if n%i==0:
break
else:
x=x+str(n)+' '
print(x.strip())
| [
"[email protected]"
] | |
670a9c4656b1ed4889e4390c5fe424466c8af425 | e7d65f8773a8c736fc9e41e843d7da6da5cc2e0b | /py3plex/algorithms/network_classification/PPR.py | 0339b2da13f9375d038028962e9f8485a7392e37 | [
"BSD-3-Clause"
] | permissive | hanbei969/Py3plex | 768e86b16ca00044fcb4188e01edf32c332c8a2a | 1ef3e0e6d468d24bd6e6aec3bd68f20b9d9686bb | refs/heads/master | 2021-01-03T18:19:24.049457 | 2020-02-12T16:51:14 | 2020-02-12T16:51:14 | 240,188,307 | 1 | 0 | BSD-3-Clause | 2020-02-13T05:57:16 | 2020-02-13T05:57:16 | null | UTF-8 | Python | false | false | 4,584 | py | ## set of routines for validation of the PPR-based classification
from ..node_ranking import *
from ..general.benchmark_classification import *
import pandas as pd
from sklearn.svm import SVC
from sklearn.metrics import f1_score
import time
import numpy as np
import multiprocessing as mp
from sklearn.model_selection import StratifiedKFold,StratifiedShuffleSplit
from sklearn import preprocessing
def construct_PPR_matrix(graph_matrix,parallel=False):
"""
PPR matrix is the matrix of features used for classification --- this is the spatially intense version of the classifier
"""
## initialize the vectors
n = graph_matrix.shape[1]
vectors = np.zeros((n, n))
results = run_PPR(graph_matrix,parallel=parallel)
## get the results in batches
for result in results:
if result != None:
## individual batches
if isinstance(result, list):
for ppr in result:
vectors[ppr[0],:] = ppr[1]
else:
ppr = result
vectors[ppr[0],:] = ppr[1]
return vectors
def construct_PPR_matrix_targets(graph_matrix,targets,parallel=False):
n = graph_matrix.shape[1]
vectors = np.empty((len(targets), n))
tar_map = dict(zip(targets,range(len(targets))))
results = run_PPR(graph_matrix,targets=targets,parallel=parallel)
for result in results:
vectors[tar_map[result[0]],:] = vectors[1]
return vectors
## deal with that now..
def validate_ppr(core_network,labels,dataset_name="test",repetitions=5,random_seed=123,multiclass_classifier=None,target_nodes=None,parallel=False):
"""
The main validation class --- use this to obtain CV results!
"""
if multiclass_classifier is None:
multiclass_classifier = SVC(kernel = 'linear', C = 1,probability=True)
df = pd.DataFrame()
for k in range(repetitions):
## this is relevant for supra-adjacency-based tasks..
if target_nodes is not None:
print("Subnetwork ranking in progress..")
vectors = construct_PPR_matrix_targets(core_network,target_nodes,parallel=parallel)
labels = labels[target_nodes]
else:
vectors = construct_PPR_matrix(core_network,parallel=parallel)
## remove single instance-single target!
nz = np.count_nonzero(labels,axis=0)
wnz = np.argwhere(nz>2).T[0]
labels = labels[:,wnz]
for j in np.arange(0.1,0.5,0.1):
## run the training..
print("Train size:{}, method {}".format(j,"PPR"))
print(vectors.shape,labels.shape)
rs = StratifiedShuffleSplit(n_splits=10, test_size=0.5, random_state=random_seed)
micros = []
macros = []
times = []
new_train_y = []
for y in labels:
new_train_y.append(list(y).index(1))
onedim_labels = np.array(new_train_y)
for X_train, X_test in rs.split(vectors,new_train_y):
start = time.time()
train_x = vectors[X_train]
test_x = vectors[X_test]
train_labels = labels[X_train]
test_labels = labels[X_test]
train_labels_first = onedim_labels[X_train]
test_labels_second = onedim_labels[X_test]
clf = multiclass_classifier
clf.fit(train_x, train_labels_first)
preds = clf.predict(test_x)
mi = f1_score(test_labels_second, preds, average='micro')
ma = f1_score(test_labels_second, preds, average='macro')
# being_predicted = np.unique(train_labels_first)
# tmp_lab = test_labels[:,being_predicted]
# mi,ma = evaluate_oracle_F1(probs,tmp_lab)
## train the model
end = time.time()
elapsed = end - start
micros.append(mi)
macros.append(ma)
times.append(elapsed)
outarray = {"percent_train": np.round(1-j,1), "micro_F":np.mean(micros),"macro_F":np.mean(macros) ,"setting": "PPR" ,"dataset": dataset_name,"time":np.mean(times)}
df = df.append(outarray,ignore_index=True)
df = df.reset_index()
return df
| [
"[email protected]"
] | |
fbcf2f345a377b236e4d5dd331708ae9b0e6cc03 | 392a4f5c76414fcbed17dd5dccaf2f64096659a2 | /app_frame/page/market.py | 0630ce30172d3d8b20da2105324e02b39ca1bd86 | [] | no_license | Allison001/homework | 3bd5794c8bdd944f827f3e8008eea1831f90644b | 1ab910d21ad4348a212b226758cfa8244ea03bfc | refs/heads/master | 2023-04-08T22:48:56.667737 | 2021-04-15T03:38:56 | 2021-04-15T03:38:56 | 324,184,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import yaml
from selenium.webdriver.common.by import By
from app_frame.basepage import BasePage
from app_frame.page.search import Search
class Market(BasePage):
def goto_search(self):
self.run_step('../page/market.yaml','goto_search')
# self.find_and_click((By.XPATH,"//*[@resource-id='com.xueqiu.android:id/action_search']"))
return Search(self.driver)
| [
"[email protected]"
] | |
c7d2e24957a8f5d7a7276553f6d133a9933b2d8a | 385e00e3d48446baf20cb3d0fbf9db0344cd95da | /test/visualization/test_utils.py | 9e8a593f52ffbe911da59c3806471afc61755eca | [
"Apache-2.0"
] | permissive | oliverdial/qiskit-experiments | d670f9151116e2e7d9a67f304a23313aa31fc30f | a387675a3fe817cef05b968bbf3e05799a09aaae | refs/heads/main | 2023-06-24T08:07:19.505243 | 2023-06-09T21:01:59 | 2023-06-09T21:01:59 | 362,153,676 | 0 | 0 | Apache-2.0 | 2021-04-27T15:03:52 | 2021-04-27T15:03:51 | null | UTF-8 | Python | false | false | 4,818 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test visualization utilities.
"""
import itertools as it
from test.base import QiskitExperimentsTestCase
from typing import List, Tuple
import numpy as np
from ddt import data, ddt
from qiskit.exceptions import QiskitError
from qiskit_experiments.visualization.utils import DataExtentCalculator
from qiskit_experiments.framework.package_deps import numpy_version
@ddt
class TestDataExtentCalculator(QiskitExperimentsTestCase):
"""Test DataExtentCalculator"""
@classmethod
def _dummy_data(
cls,
extent: Tuple[float, float, float, float] = (-1, 1, -5, 0),
n_data: int = 5,
n_points: int = 16,
) -> List[np.ndarray]:
# Create a list of bin edges by which to divide the target extent
bin_edges = [
np.histogram_bin_edges(extent[0:2], bins=n_data).tolist(),
np.histogram_bin_edges(extent[2:], bins=n_data).tolist(),
]
# Iterate over pairs of adjacent bin edges, which define the maximum and minimum for the region.
# This is done by generating sliding windows of bin_edges as follows:
# [[a], [b], [c], [d], [e], [f]], g]
# [a, [[b], [c], [d], [e], [f], [g]]
# The result is a list of pairs representing a moving window of size 2.
# TODO: remove the old code once numpy is above 1.20.
dummy_data = []
if numpy_version() >= (1, 20):
for (x_min, x_max), (y_min, y_max) in it.product(
*np.lib.stride_tricks.sliding_window_view(bin_edges, 2, 1)
):
_dummy_data = np.asarray(
[
np.linspace(x_min, x_max, n_points),
np.linspace(y_min, y_max, n_points),
]
)
dummy_data.append(_dummy_data.swapaxes(-1, -2))
else:
for (x_min, x_max), (y_min, y_max) in it.product(
*tuple(list(zip(b[0:-1], b[1:])) for b in bin_edges)
):
_dummy_data = np.asarray(
[
np.linspace(x_min, x_max, n_points),
np.linspace(y_min, y_max, n_points),
]
)
dummy_data.append(_dummy_data.swapaxes(-1, -2))
return dummy_data
@data(*list(it.product([1.0, 1.1, 2.0], [None, 1.0, np.sqrt(2)])))
def test_end_to_end(self, args):
"""Test end-to-end functionality.
Results that are asserted include the range of the final extent tuple and its midpoint.
"""
# Test args
multiplier, aspect_ratio = args[0], args[1]
# Problem inputs
extent = (-1, 1, -5, 1)
n_data = 6
dummy_data = self._dummy_data(extent, n_data=n_data)
ext_calc = DataExtentCalculator(multiplier=multiplier, aspect_ratio=aspect_ratio)
# Add data as 2D and 1D arrays to test both methods
for d in dummy_data[0 : int(n_data / 2)]:
ext_calc.register_data(d)
for d in dummy_data[int(n_data / 2) :]:
for i_dim in range(2):
ext_calc.register_data(d[:, i_dim], dim=i_dim)
# Check extent
actual_extent = ext_calc.extent()
# Check that range was scaled. Given we also have an aspect ratio, we may have a range that is
# larger than the original scaled by the multiplier. At the minimum, the range should be exactly
# equal to the original scaled by the multiplier
expected_range = multiplier * np.diff(np.asarray(extent).reshape((2, 2)), axis=1).flatten()
actual_range = np.diff(np.reshape(actual_extent, (2, 2)), axis=1).flatten()
for act, exp in zip(actual_range, expected_range):
self.assertTrue(act >= exp)
# Check that the midpoints are the same.
expected_midpoint = np.mean(np.reshape(extent, (2, 2)), axis=1).flatten()
actual_midpoint = np.mean(np.reshape(actual_extent, (2, 2)), axis=1).flatten()
np.testing.assert_almost_equal(
actual_midpoint,
expected_midpoint,
)
def test_no_data_error(self):
"""Test that a QiskitError is raised if no data was set."""
ext_calc = DataExtentCalculator()
with self.assertRaises(QiskitError):
ext_calc.extent()
| [
"[email protected]"
] | |
4f0d3727a003f65b28d97e95316cdc9eefd284eb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_196/ch80_2020_04_13_18_23_05_143280.py | f6edda895b2e0e2bcd29788dd3078b902f425c3f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | def interseccao_chaves(dic1,dic2):
lista = []
for a in dic1.keys() and in dic2.keys():
lista.append(a,b)
return lista
| [
"[email protected]"
] | |
b7935778e4af05b4794433f47991deced92fb943 | d9a469bc9cff39d89e7cb04e4fc537763aee9aca | /binance_chain/exceptions.py | 957d3ed87c3cd1eb28ab1f816979271c6ed5ca5f | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | sammchardy/python-binance-chain | d017c0f1e6bd84d28017f87e2d229b21a2ee8b8a | 19d7d639cc912a27ec86831338c2a2dc96289d50 | refs/heads/master | 2023-05-11T19:15:44.912507 | 2021-06-01T03:14:25 | 2021-06-01T03:14:25 | 172,324,144 | 310 | 111 | MIT | 2022-06-30T10:55:19 | 2019-02-24T10:29:29 | Python | UTF-8 | Python | false | false | 1,626 | py | import ujson as json
class BinanceChainAPIException(Exception):
def __init__(self, response, status_code):
self.code = 0
try:
json_res = json.loads(response.content)
except ValueError:
if not response.content:
self.message = status_code
else:
self.message = 'Invalid JSON error message from Binance Chain: {}'.format(response.text)
else:
self.code = json_res.get('code', None)
self.message = json_res['message']
self.status_code = status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return f'APIError(code={self.code}): {self.message}'
class BinanceChainRequestException(Exception):
pass
class BinanceChainBroadcastException(Exception):
pass
class BinanceChainSigningAuthenticationException(Exception):
pass
class BinanceChainRPCException(Exception):
def __init__(self, response):
self.code = 0
try:
json_res = json.loads(response.content)
except ValueError:
self.message = 'Invalid JSON error message from Binance Chain: {}'.format(response.text)
else:
self.code = json_res['error']['code']
self.message = json_res['error']['message']
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return f'RPCError(code={self.code}): {self.message}'
| [
"[email protected]"
] | |
05cdd6e0b5aadfcd1453901287e445578f2b8e29 | 6ba38fe94e7ea5146c633f56f59c0c3278d695a7 | /build/build_for_ios | d6425614eecc82e40f167b7e162c91cecd846058 | [
"MIT"
] | permissive | mworks/mworks | b49b721c2c5c0471180516892649fe3bd753a326 | abf78fc91a44b99a97cf0eafb29e68ca3b7a08c7 | refs/heads/master | 2023-09-05T20:04:58.434227 | 2023-08-30T01:08:09 | 2023-08-30T01:08:09 | 2,356,013 | 14 | 11 | null | 2012-10-03T17:48:45 | 2011-09-09T14:55:57 | C++ | UTF-8 | Python | false | false | 941 | #!/usr/bin/env python3
import argparse
from subprocess import check_call
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--archive', action='store_true',
help='create an archive for distribution')
parser.add_argument('version', nargs='?', help='version number')
args = parser.parse_args()
cmd = [
'/usr/bin/xcrun',
'xcodebuild',
'-workspace', 'MWorks-iOS.xcworkspace',
'-scheme', 'MWorks',
'-destination', 'generic/platform=iOS',
]
if args.archive:
cmd.append('archive')
else:
cmd.extend(['clean', 'build'])
cmd.extend(['GCC_TREAT_WARNINGS_AS_ERRORS=YES',
'MTL_TREAT_WARNINGS_AS_ERRORS=YES',
'SWIFT_TREAT_WARNINGS_AS_ERRORS=YES'])
if args.version:
cmd.append('MW_VERSION=' + args.version)
check_call(cmd)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
441e60c7846fde6cca41e6cbb3845b685e4f8672 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/symmetry_search/boost_python/SConscript | be2824dfaa2fdc51694642b708bafd590f93bda6 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 216 | Import("env_cctbx_boost_python_ext")
env = env_cctbx_boost_python_ext.Clone()
env.Prepend(LIBS=["cctbx", "omptbx"])
env.SharedLibrary(target="#lib/cctbx_symmetry_search_ext", source=[
"symmetry_search_ext.cpp",
])
| [
"[email protected]"
] | ||
4f1ec457cdb2aff59d8558ed5d090e890e081fa7 | 80a689cecd96315e55e6452d201e6531868bdc99 | /management/commands/pdk_nudge_ios_devices_boto.py | c82c6760ca2673b2252cf9062343fe8914127764 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | kamau-edwin/PassiveDataKit-Django | d36fad6b366fef32c96941b10455b054abd44f7c | 95db5701f88c74328b0611124149fdffa079e7b8 | refs/heads/master | 2021-01-06T17:25:50.471370 | 2020-02-26T21:17:32 | 2020-02-26T21:17:32 | 241,416,694 | 0 | 0 | Apache-2.0 | 2020-02-18T16:56:09 | 2020-02-18T16:56:07 | null | UTF-8 | Python | false | false | 5,828 | py | # pylint: disable=no-member,line-too-long
import json
import re
import boto
import boto.exception
import boto.sns
from django.conf import settings
from django.core.management.base import BaseCommand
from ...decorators import handle_lock, log_scheduled_event
from ...models import DataPoint
class Command(BaseCommand):
help = 'Send silent notifications to iOS devices to nudge power management systems for transmission using Boto and Amazon Simple Notification Service.'
def add_arguments(self, parser):
pass
@handle_lock
@log_scheduled_event
def handle(self, *args, **options): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
tokens = {}
for point in DataPoint.objects.filter(generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token').order_by('created'):
properties = point.fetch_properties()
tokens[point.source] = properties['event_details']['token']
region = [r for r in boto.sns.regions() if r.name == settings.PDK_BOTO_REGION][0]
notification = {'aps': {'content-available' : 1}}
message = {'APNS': json.dumps(notification), 'default': 'nil'}
sns = boto.sns.SNSConnection(
aws_access_key_id=settings.PDK_BOTO_ACCESS_KEY,
aws_secret_access_key=settings.PDK_BOTO_ACCESS_SECRET,
region=region,
)
for source, token in tokens.iteritems(): # pylint: disable=unused-variable
try:
endpoint_response = sns.create_platform_endpoint(
platform_application_arn=settings.PDK_BOTO_SNS_ARN,
token=token,
)
endpoint_arn = endpoint_response['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
except boto.exception.BotoServerError, err:
print 'ERR 1: ' + err.message
# Yes, this is actually the official way:
# http://stackoverflow.com/questions/22227262/aws-boto-sns-get-endpoint-arn-by-device-token
result_re = re.compile(r'Endpoint(.*)already', re.IGNORECASE)
result = result_re.search(err.message)
if result:
endpoint_arn = result.group(0).replace('Endpoint ', '').replace(' already', '')
else:
raise
try:
sns.publish(target_arn=endpoint_arn, message_structure='json', message=json.dumps(message))
except boto.exception.BotoServerError, err:
print 'FAILED SENDING TO ' + token
print 'ERR: ' + err.message
result_re = re.compile(r'Endpoint(.*)disabled', re.IGNORECASE)
result = result_re.search(err.message)
if result:
for point in DataPoint.objects.filter(source=source, generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token').order_by('created'):
properties = point.fetch_properties()
if token == properties['event_details']['token']:
print 'RENAMING: ' + token
point.secondary_identifier = 'pdk-ios-device-token-sandbox'
point.save()
else:
raise
tokens = {}
for point in DataPoint.objects.filter(generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token-sandbox').order_by('created'):
properties = point.fetch_properties()
tokens[point.source] = properties['event_details']['token']
message = {'APNS_SANDBOX': json.dumps(notification), 'default': 'nil'}
for source, token in tokens.iteritems(): # pylint: disable=unused-variable
try:
endpoint_response = sns.create_platform_endpoint(
platform_application_arn=settings.PDK_BOTO_SNS_ARN_SANDBOX,
token=token,
)
endpoint_arn = endpoint_response['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
except boto.exception.BotoServerError, err:
print 'ERR 2: ' + err.message
# Yes, this is actually the official way:
# http://stackoverflow.com/questions/22227262/aws-boto-sns-get-endpoint-arn-by-device-token
result_re = re.compile(r'Endpoint(.*)already', re.IGNORECASE)
result = result_re.search(err.message)
if result:
endpoint_arn = result.group(0).replace('Endpoint ', '').replace(' already', '')
else:
raise
try:
sns.publish(target_arn=endpoint_arn, message_structure='json', message=json.dumps(message))
# print('PUBLISHED DEV: ' + token)
except boto.exception.BotoServerError, err:
print 'FAILED SENDING 2 TO ' + token
print 'ERR: ' + err.message
result_re = re.compile(r'Endpoint(.*)disabled', re.IGNORECASE)
result = result_re.search(err.message)
if result:
for point in DataPoint.objects.filter(source=source, generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token-sandbox').order_by('created'):
properties = point.fetch_properties()
if token == properties['event_details']['token']:
print 'RENAMING 2: ' + token
point.secondary_identifier = 'pdk-ios-device-token-disabled'
point.save()
else:
raise
| [
"[email protected]"
] | |
cebcfbab3351bb97acf855a4e8a6a0e12ecff3e0 | d88f9acfe09d79f06cf251b8cbbb012b55d99f39 | /Scraping/test_scraping/create_sqlite_db.py | 95e1b304f3fc724c65ddf601e4224bbe7e44b3ed | [] | no_license | Twishar/DataAnalysis | 535beb795e30b8ac07767a61f1ebfbc60546271f | e5d5ba9ba0b9a51031e8f1f4225bc35d848159dd | refs/heads/master | 2022-03-04T19:02:30.917729 | 2019-11-15T14:18:53 | 2019-11-15T14:18:53 | 98,515,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
import sqlite3
conn = sqlite3.connect('allo_db.sqlite')
c = conn.cursor()
c.execute('''CREATE TABLE allo_parse
(search_param text, results text);''')
conn.commit()
conn.close()
| [
"[email protected]"
] | |
3f39b4c11c3aa082d210897c4b788bb31b2e0551 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/carbonui/control/windowDropDownMenu.py | 6c26d7806b20cec4ebb3158345c97b472461b7f6 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,453 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\control\windowDropDownMenu.py
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.fill import Fill
from carbonui.primitives.line import Line
from carbonui.control.label import LabelOverride as Label
class WindowDropDownMenuCore(Container):
__guid__ = 'uicls.WindowDropDownMenuCore'
default_height = 10
default_align = uiconst.TOLEFT
default_state = uiconst.UI_NORMAL
def Setup(self, name, GetMenu):
self.name = name
self.expandOnLeft = 1
self.PrepareLayout()
self.GetMenu = GetMenu
def PrepareLayout(self):
Line(parent=self, align=uiconst.TORIGHT)
self.label = Label(text=self.name, parent=self, align=uiconst.CENTER, fontsize=9, letterspace=1, top=1, state=uiconst.UI_DISABLED, uppercase=1)
self.hilite = Fill(parent=self, state=uiconst.UI_HIDDEN, padding=1)
self.width = self.label.width + 10
self.cursor = uiconst.UICURSOR_SELECT
def OnMouseEnter(self):
self.hilite.state = uiconst.UI_DISABLED
def OnMouseExit(self):
self.hilite.state = uiconst.UI_HIDDEN
def GetMenuPosition(self, *args):
return (self.absoluteLeft, self.absoluteBottom + 2)
class WindowDropDownMenuCoreOverride(WindowDropDownMenuCore):
pass
| [
"[email protected]"
] | |
a317a9e4f4f5d6e738556b77ccdf5ca54c22337f | d8ef155d2b931642e448263d43fbf856b3a466c0 | /certificates/__main__.py | ac85092b9df679740502289f380cc93e8e0a251c | [
"Apache-2.0"
] | permissive | diemesleno/certificates | a34632bc97a175fd739cdaa6d78f880316176a3c | 7aedf80903304216c6d9a8c99efd4df5aa7f8049 | refs/heads/master | 2022-02-15T17:44:43.132433 | 2019-08-16T05:44:26 | 2019-08-16T05:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import argparse
from .certificates import make_certificates
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"participants", help="csv filaname containing participants"
)
parser.add_argument(
"template", help="certificate template in svg format used to build"
)
parser.add_argument(
"--output",
"-o",
default="./output",
help="destination of the generated certificates",
)
args = parser.parse_args()
make_certificates(args.participants, args.template, args.output)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
eee490dcf526ffb10b67a1324f01736b974f8ce9 | 89f8a2e609c2b2a7e4ca10be3830200c7e8e438e | /ftp_homework/ftp_1/bin/start_server.py | e0741d5f538a88369aa9ea5194dab97ea4334bde | [] | no_license | boundshunter/s5-study | b8265ccc0d09f19624002b5919c5fb6104bf65d3 | 528eda7435a14a2a79c88af02695efec13972f25 | refs/heads/master | 2018-09-27T17:40:28.352951 | 2018-06-11T15:38:13 | 2018-06-11T15:38:13 | 111,669,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'jfsu'
import sys
import os
BaseDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BaseDir)
from core import ftpserver
if __name__ == '__main__':
sv = ftpserver.FtpServer()
| [
"[email protected]"
] | |
27a9e101cd4a7f253db5f5c89fb3068918340ead | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/03 Lists Basics/Exercises/07_Easter_Gifts.py | 172ea7853a18b1443adb30323f730642b61c1f6b | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | gifts_names = input().split(" ")
command = input()
while command != "No Money":
command_list = command.split(" ")
if command_list[0] == "OutOfStock":
if command_list[1] in gifts_names:
for i in range(len(gifts_names)):
if gifts_names[i] == command_list[1]:
gifts_names[i] = "None"
elif command_list[0] == "Required" and int(command_list[2]) > 0 and int(command_list[2]) <= int(
len(gifts_names)) - 1:
gifts_names[int(command_list[2])] = command_list[1]
elif command_list[0] == "JustInCase":
gifts_names[int(len(gifts_names)) - 1] = command_list[1]
command = input()
for n in range(len(gifts_names)):
if "None" in gifts_names:
gifts_names.remove("None")
gifts_names_print = " ".join(gifts_names)
print(gifts_names_print)
| [
"[email protected]"
] | |
92f0088358bab1fa58c2c52e016d253b12bfc28d | 7246faf9a222269ce2612613f58dc5ff19091f10 | /baekjoon/3000~5999/4948_베르트랑공준.py | f2adb647d3f69804cccea3dfb61db9c7a6ded31a | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from math import sqrt
arr = [i for i in range(250000)]
arr[0] = 0
arr[1] = 0
for i in range(2, int(sqrt(250000)) + 1):
for j in range(i + i, 250000, i):
if arr[j] != 0:
arr[j] = 0
while True:
N = int(input())
ct = 0
if N == 0:
break
for i in range(N + 1, N * 2 + 1):
if arr[i] != 0:
ct += 1
print(ct)
| [
"[email protected]"
] | |
fd6c788ba6b8318466159be137309f8ff4ea1a29 | 9f109d4d4fa2eb4ecec2415a21e45945a35cd58a | /xshop/users/tests/test_models.py | 81150f9ff1be611e68b2606f5f69d464e95e5b0d | [] | no_license | denokenya/xshop-web | 4be66a39272075b778ed7dd8de996fec90b5fab8 | 262665ec4c2cb91490b219a086b8994d6eceb805 | refs/heads/master | 2023-06-07T02:54:57.068430 | 2020-09-13T11:24:32 | 2020-09-13T11:24:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from django.test import TestCase
from model_bakery import baker
from ..models import User
class UserTests(TestCase):
def setUp(self) -> None:
self.user = baker.make(
User,
mobile="01010092181",
name="Ahmed Loay Shahwan",
email="[email protected]",
)
self.user1 = baker.make(User, mobile="01010092182")
def test_get_short_name(self):
self.assertEqual(self.user.get_short_name(), "Ahmed")
def test_get_full_name(self):
self.assertEqual(self.user.get_full_name(), "Ahmed Loay Shahwan")
def test_str(self):
self.assertEqual(str(self.user), "01010092181")
def test_repr(self):
# user with name
self.assertEqual(
self.user.__repr__(),
f"<User {self.user.id}: {str(self.user)} - {self.user.name}>",
)
# user without name
self.assertEqual(
self.user1.__repr__(), f"<User {self.user1.id}: {str(self.user1)}>",
)
| [
"[email protected]"
] | |
4558b73f4309e412016f5c1d22d3652908e71d01 | c2c84c98f2247f2a9fe280e41f3a4dc74fd4de1a | /online/analyses.py | 73a0d03dbb5a0da0b17ff4129ab1c019baf63cab | [
"MIT"
] | permissive | mrware91/tmolv29 | 153ded42ee190287442330943a2a9c51d8e55243 | 823321f2505b684e9fd1de1c01f4e46997f1e307 | refs/heads/main | 2023-04-06T13:55:09.926010 | 2021-04-14T14:26:05 | 2021-04-14T14:26:05 | 347,172,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | # Contributors: Matt Ware
import numpy as np
class analyses:
def __init__(self, analysis, totalEvents,printMode='verbose'):
self.analysis = analysis
self.totalEvents = totalEvents
self.events = 0
self.printMode = printMode
self.data = {}
self.dataTypesFound = False
self.outTypes = {}
self.initialize()
def initialize(self):
self.events = 0
self.data = {}
for key in self.analysis:
self.outTypes[key] = None
self.analysis[key]['type'] = None
self.analysis[key]['size'] = None
self.data[key] = np.zeros(self.totalEvents)*np.nan
self.setdefault(self.analysis[key],
'function',
'%s: No analysis function provided. Defaulting to return raw data.'%key,
lambda x: x)
self.setdefault(self.analysis[key],
'analyzeEvery',
'%s: No modulo provided. Will analyze every shot.'%key,
1)
def update(self, detectors):
self.dataTypesFound = True
for key in self.analysis:
analyzeEvery = self.analysis[key]['analyzeEvery']
if not ( self.events%analyzeEvery == 0):
continue
function = self.analysis[key]['function']
detectorKey = self.analysis[key]['detectorKey']
shotData = detectors[detectorKey]['shotData']
if (shotData is None) & (self.analysis[key]['type'] is None):
self.dataTypesFound = False
continue
elif (shotData is None) & (self.analysis[key]['type'] is not None):
self.data[key][self.events,] = self.data[key][self.events,]*np.nan
continue
result = function(shotData)
if result is not None:
if self.analysis[key]['type'] is None:
self.analysis[key]['type'] = type(result)
self.analysis[key]['size'] = np.size(result)
dims = np.shape(result)
self.data[key] = np.zeros((self.totalEvents,*dims))*np.nan
self.data[key][self.events,] = result
if self.outTypes[key] is None:
self.outTypes[key] = {}
self.outTypes[key]['type'] = type(self.data[key][self.events,])
self.outTypes[key]['size'] = np.size( self.data[key][self.events,] )
elif (result is None) & (self.analysis[key]['type'] is None):
self.dataTypesFound = False
self.events += 1
if self.events >= self.totalEvents:
self.cprint('Read events exceeds total expected. Resetting event count.')
self.events = 0
def setdefault(self, adict, key, response, default):
try:
adict[key]
except KeyError as ke:
allowedErrorStr = '\'%s\'' % key
if allowedErrorStr == str(ke):
self.cprint(response)
adict[key] = default
else:
raise ke
# def cprint(self,aString):
# print(aString)
def cprint(self, aString):
if self.printMode in 'verbose':
print(aString)
elif self.printMode in 'quiet':
pass
else:
print('printMode is %s. Should be verbose or quiet. Defaulting to verbose.'%self.printMode)
self.printMode = 'verbose'
self.cprint(aString)
def H5out(self):
if self.dataTypesFound:
outDict = {}
for key in self.data:
try:
outDict[key] = np.copy(self.data[key][0,:])
except IndexError as ie:
if ('1-dimensional' in str(ie)):
# print(f'dimension of {key} is {self.data[key].shape}')
outDict[key] = np.copy(self.data[key][:])
else:
raise ie
return outDict
else:
return None | [
"[email protected]"
] | |
2a5762a03705f381381e6c124790e7ce1ab5d662 | 93a7db386dfa0ac0dc369cc7f4b974224c801d8d | /scripts/ngram_io.py | 33d3856f68312a40f09259482de1803a86d567b5 | [] | no_license | lingxiao/good-great-combo | e051f20c89b7317a14ca5cee357bda7b095ce174 | 4d2691866bc21e2c542354ad3aae6f369eb86c87 | refs/heads/master | 2021-01-19T19:30:43.391759 | 2017-04-09T12:35:15 | 2017-04-09T12:35:15 | 83,699,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | ############################################################
# Module : Open Ngram and read linguistic pattern
# Date : April 3rd, 2017
# Author : Xiao Ling, merle
############################################################
import os
############################################################
'''
@Use : Open all ngrams in ngram_dir and stream output as tuple of (ngram, count)
@Input : - ngram_dir :: String
- debug :: Bool, if true then only output parts of stream
@Output: Iterator output ngrams of form:
(ngram, count) :: Iterator (String, String)
Throw: NameError if path does not exists
'''
def with_ngram(ngram_dir, debug = False):
if not os.path.exists(ngram_dir):
raise NameError('Path not found at ' + ngram_dir)
else:
ngram_paths = [os.path.join(ngram_dir, p) for \
p in os.listdir(ngram_dir) if '.txt' in p]
if not ngram_paths:
raise NameError('Directory Empty at ' + ngram_dir)
if debug:
ngram_paths = [ngram_paths[0]]
for path in ngram_paths:
with open(path, 'rb') as h:
for line in h:
xsn = line.split('\t')
if len(xsn) == 2:
xs,n = xsn
n,_ = n.split('\n')
yield (xs,n)
############################################################
'''
@Use: Given path to linguistic pattern, output pattern
'''
def read_pattern(pattern_path):
if os.path.exists(pattern_path):
strong_weak, weak_strong = open(pattern_path,'rb').read().split('=== weak-strong')
strong_weak = [p for p in strong_weak.split('\n') if p][1:]
weak_strong = [p for p in weak_strong.split('\n') if p][:-1]
return {'strong-weak': strong_weak, 'weak-strong': weak_strong}
else:
raise NameError('Cannot find pattern at path ' + pattern_path)
| [
"[email protected]"
] | |
c4d693a018899753b9d47f6da7643ece8efb4bfe | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /irmtbds/migrations/0002_auto_20150218_1621.py | 3c05b27f5b6c037590a673b577c9744a196e934f | [] | no_license | MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('irmtbds', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='rqzheruyb',
name='xknvpfy',
),
migrations.AddField(
model_name='rqzheruyb',
name='kplrvqptcm',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
d605544bb5bd4b5f2f891b75f75930b2d21e7fe4 | 048df2b4dc5ad153a36afad33831017800b9b9c7 | /atcoder/agc008/agc008_c.py | 01428e6976f334cebf389e5e84a0a5f947a48943 | [] | no_license | fluffyowl/past-submissions | a73e8f5157c647634668c200cd977f4428c6ac7d | 24706da1f79e5595b2f9f2583c736135ea055eb7 | refs/heads/master | 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | a, b, c, d, e, f, g = map(int, raw_input().split())
if a > 0 and d > 0 and e > 0:
ans1 = b + a / 2 * 2 + d / 2 * 2 + e / 2 * 2
ans2 = b + 3 + (a-1) / 2 * 2 + (d-1) / 2 * 2 + (e-1) / 2 * 2
print max(ans1, ans2)
else:
print b + a / 2 * 2 + d / 2 * 2 + e / 2 * 2
| [
"[email protected]"
] | |
b4d01dd3705d74d25a15957865fcbc913580986c | 36afa271f080459adf1014cd23f4be9f954dfee6 | /Crawler/Course/第八章:scrapy框架/sunPro/sunPro/spiders/sun.py | 35ab678e80afc0bf5d06d12f11a75a5455738471 | [] | no_license | King-Of-Game/Python | b69186a7574ce1c0b7097207cfe9a2eb38a90bc0 | 643b9fd22efd78f6679735f23432943a57b5f5bb | refs/heads/master | 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 | HTML | UTF-8 | Python | false | false | 2,909 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from sunPro.items import SunproItem
from sunPro.items import DetailItem
# 需求:爬取小说分类、名称、人气、简介
class SunSpider(CrawlSpider):
name = 'sun'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.69shu.org/fenlei/1_1/']
# 链接提取器:根据指定规则(allow="正则")进行链接的提取
link_extractor = LinkExtractor(allow=r'fenlei/1_(?!16|\d{3,})')
link_detail_extractor = LinkExtractor(allow=r'/book/\d+/(?!\d+\.html)') # /book/\d+/(?!\d+\.html)
rules = (
# 规则解析器:将链接提取器提取到的链接进行指定规则(callback)的解析操作
# follow=True:可以将链接提取器继续作用到,链接提取器提取的链接,对应的页面中
Rule(link_extractor, callback='parse_novel_name', follow=False),
Rule(link_detail_extractor, callback='parse_novel_detail', follow=False),
)
'''
以下两个解析方法没有手动发起请求,是不可以实现请求传参的: 也就是说不能通过yield scrapy.Request() 回调其它函数
无法将两个解析方法解析的数据存储到同一个item中,可以依次存储到两个item中
'''
# 解析小说类别、名称、作者
def parse_novel_name(self, response):
# item = {}
# #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
# #item['name'] = response.xpath('//div[su@id="name"]').get()
# #item['description'] = response.xpath('//div[@id="description"]').get()
# return item
print('\n', response)
# 注意:xpath表达式中不可以出现tbody标签
li_list = response.xpath('/html/body/div[3]/div/div/div[2]/div[1]/div[2]/ul/li')
for li in li_list:
novel_category = li.xpath('./span[1]/text()').extract_first()
novel_name = li.xpath('./span[2]/a/text()').extract_first()
novel_author = li.xpath('./span[4]/text()').extract_first()
# print(novel_category, novel_name, novel_author)
item = SunproItem()
item['novel_category'] = novel_category
item['novel_name'] = novel_name
item['novel_author'] = novel_author
yield item
# 解析小说人气和简介
def parse_novel_detail(self, response):
# print(response)
novel_popularity = response.xpath('//*[@id="info"]/p/span/text()').extract_first()
novel_synopsis = response.xpath('//*[@id="info"]/div[3]//text()').extract()
novel_synopsis = ''.join(novel_synopsis)
# print(novel_popularity)
item = DetailItem()
item['novel_popularity'] = novel_popularity
item['novel_synopsis'] = novel_synopsis
yield item
| [
"[email protected]"
] | |
15255dffd47f10b3f99409f7b5dea95315005ab9 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/users/models.py | a3a6f2b88a946f2a8ca0ab80decd3e78a3924509 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
picture = models.ImageField('Fotode perfil', default='/img/blank-pic.png')
following = models.ManyToManyField('self', blank=True) | [
"[email protected]"
] | |
ea9e7a8b99cd02b1f71e0f5c2c419a055b084728 | fe0bca3fcf363ebc465fcc370e77b55df1cfaaa7 | /src/route/work_viewer.py | f79466d814c37cc4151ac1ca0217dbe9d45950dc | [] | no_license | sihcpro/todo-list | 66847aece556fe45223b98ecc44f04bbaaf17b55 | 1db48a63e9f4d309d57baeca691f6e85c36866a6 | refs/heads/master | 2022-11-17T14:34:20.316901 | 2020-07-14T10:16:18 | 2020-07-14T10:16:18 | 279,233,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,661 | py | import calendar
from datetime import timedelta
from sqlalchemy import Date, and_, cast, or_
from .data_define import ShowWorkData
from .resource import WorkResource
def configWorkViewer(Domain):
session = Domain.session
def getValidatedDate(param):
date_data = ShowWorkData(
from_date=param["from_date"][0], to_date=param["to_date"][0],
)
if date_data.from_date > date_data.to_date:
raise ValueError("from_date must smaller than to_date")
return date_data
def getWorkInAPerius(from_date, to_date):
record = {"from_date": str(from_date), "to_date": str(to_date)}
if from_date == to_date:
works = (
session.query(WorkResource)
.filter(
or_(
cast(WorkResource.starting_date, Date) == to_date,
cast(WorkResource.ending_date, Date) == to_date,
and_(
cast(WorkResource.starting_date, Date) < to_date,
cast(WorkResource.ending_date, Date) > to_date,
),
)
)
.all()
)
else:
works = (
session.query(WorkResource)
.filter(
or_(
and_(
WorkResource.starting_date >= from_date,
WorkResource.starting_date < to_date,
),
and_(
WorkResource.ending_date >= from_date,
WorkResource.ending_date < to_date,
),
and_(
WorkResource.starting_date <= from_date,
WorkResource.ending_date >= to_date,
),
)
)
.all()
)
record["works"] = [work.asDict() for work in works]
return record
@Domain.registerQuery("show-work-by-date")
def showWorkByDate(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
results = []
while date <= date_data.to_date:
results.append(getWorkInAPerius(date, date))
date += timedelta(days=1)
return results
@Domain.registerQuery("show-work-by-week")
def showWorkByWeek(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
date = date - timedelta(days=date.weekday())
results = []
while date <= date_data.to_date:
start_date = date
end_date = date + timedelta(weeks=1) - timedelta(microseconds=1)
results.append(getWorkInAPerius(start_date, end_date))
date += timedelta(weeks=1)
return results
@Domain.registerQuery("show-work-by-month")
def showWorkByMonth(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
date = date - timedelta(days=date.day - 1)
results = []
while date <= date_data.to_date:
days_in_month = calendar.monthrange(date.year, date.month)[1]
start_date = date
end_date = (
date
+ timedelta(days=days_in_month)
- timedelta(microseconds=1)
)
results.append(getWorkInAPerius(start_date, end_date))
date += timedelta(days=days_in_month)
return results
| [
"[email protected]"
] | |
77d3ccb4fbb606e29dc100993d9286af9143d806 | f00767fdeed6bfa8b12f6900b9f9bd5c70786895 | /models/base_db.py | b9ec16abf725b932e97446cf9463b303db180b0b | [] | no_license | guoyu07/domain_whois_query | de22cb5d83db2441ba512935fd7f3ed5c158997a | c70b52f2b9306e4b9ead273de279cd149052623f | refs/heads/master | 2020-12-07T06:24:57.907042 | 2015-11-29T00:53:31 | 2015-11-29T00:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # encoding:utf-8
"""
操作数据库基础类
"""
import torndb
class BaseDb(object):
def __init__(self):
self.db = torndb.Connection(
host="172.26.253.3",
database="DomainWhois",
user="root",
password="platform",
charset="utf8"
)
| [
"[email protected]"
] | |
75750e2d778d9088cc0aa9d4e0a9b23d245d0029 | 7041c85dffb757c3e7063118730363f32ebb9b8a | /프로젝트/20190111/open_api.py | af937d2499eb4c1f56272d6930b3d2c64641b4f6 | [] | no_license | woonji913/til | efae551baff56f3ca16169b93185a65f4d81cd7a | a05efc68f88f535c26cb4d4a396a1e9cd6bf0248 | refs/heads/master | 2021-06-06T23:17:54.504620 | 2019-06-19T04:29:18 | 2019-06-19T04:29:18 | 163,778,844 | 1 | 0 | null | 2021-05-08T16:27:17 | 2019-01-02T01:08:19 | HTML | UTF-8 | Python | false | false | 1,240 | py | import requests
from bs4 import BeautifulSoup
import csv, datetime, os
date = datetime.date(2019, 1, 13)
weeks = datetime.timedelta(7)
movies = []
check = set()
key = os.environ['KEY']
for i in range(10):
current = date - weeks * i
url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchWeeklyBoxOfficeList.json?key={key}&weekGb=0&targetDt='
url += str(current.strftime('%Y%m%d'))
res_json = requests.get(url).json()
for j in res_json['boxOfficeResult']['weeklyBoxOfficeList']:
code = j['movieCd']
name = j['movieNm']
total_aud = j['audiAcc']
if code not in check:
print(name)
movies.append({'movie_code': code, 'title': name, 'audience': total_aud, 'recorded_at': current})
check.add(code)
# movieIDDF = pd.DataFrame()
# movieIDDF = movieIDDF.append({"movieCd":" ", "movieNM": " ", "audiCnt": " ", "openDt": " "}, ignore_index = True)
# # pprint(movieIDDF)
with open('boxoffice.csv', 'w', encoding='utf-8', newline='') as f:
fieldnames = ('movie_code', 'title', 'audience', 'recorded_at')
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for movie in movies:
writer.writerow(movie) | [
"[email protected]"
] | |
694553df0c0aa0de72c6cd3372d907b36a37b9fa | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_RTN8_solve.py | 7578551770778fbca70157c20919e407da47b880 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import math
def optimal(from_, to_):
if from_ % 2 == 0:
yield from_
from_ += 1
for divider_candidate in range(from_, to_, 2):
yield divider_candidate
def get_divider(x, from_, to_):
for divider_candidate in optimal(from_, min(to_, int(math.sqrt(x)) + 1)):
if x % divider_candidate == 0:
return divider_candidate
def solve(n_and_j):
n, j = n_and_j.split(' ')
n, j = int(n), int(j)
results_candidates = []
results = []
def generate_jamcoin_candidate():
for bin_number in range(0, 2 ** (n - 1)):
yield ('1{:0%sb}1' % (n - 2)).format(bin_number)
jamcoin_candidate_generator = generate_jamcoin_candidate()
def get_jamcoin_candidate(i):
if i >= len(results_candidates):
jamcoin_candidate = next(jamcoin_candidate_generator)
results_candidates.append((
jamcoin_candidate,
{'nums': [int(jamcoin_candidate, b) for b in range(2, 11)],
'step': 2,
'results': [None] * 9}))
return results_candidates[i]
jamcoin_candidate_i = 0
max_divider = 4
max_jamcoin_i = 2
max_bin_number = 2 ** (n - 1)
while True:
jamcoin_candidate, stats = get_jamcoin_candidate(jamcoin_candidate_i)
all_done = True
for i, num in enumerate(stats['nums']):
if stats['results'][i]:
continue
divider = get_divider(num, stats['step'], max_divider)
if divider:
stats['results'][i] = divider
else:
all_done = False
if all_done:
results.append(jamcoin_candidate + ' ' + ' '.join(map(str, stats['results'])))
results_candidates.pop(jamcoin_candidate_i)
if len(results) == j:
return '\n'.join(results)
else:
jamcoin_candidate_i += 1
if jamcoin_candidate_i >= max_jamcoin_i:
max_divider += 2
jamcoin_candidate_i = 0
max_jamcoin_i = min(max_bin_number, max_jamcoin_i * 2)
if __name__ == '__main__':
cases_number = int(input())
for case_number in range(1, cases_number + 1):
input_args = input()
print('Case #%s:\n%s' % (case_number, solve(input_args)))
| [
"[[email protected]]"
] | |
3c851c00f3168cf06f90684e89022ab2bc3965e0 | c9697437c292df7fefd68559fdd9636066bdb2f1 | /dev/animations/quick_sph_harm_anim.py | 70d6bba7b23d2c08505d1efe4f8e75ea2ef961bf | [] | no_license | JoshKarpel/ionization | ebdb387483a9bc3fdb52818ab8e897e562ffcc67 | 3056df523ee90147d262b0e8bfaaef6f2678ea11 | refs/heads/master | 2021-03-24T13:03:57.469388 | 2020-04-06T03:37:04 | 2020-04-06T03:37:04 | 62,348,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | import logging
import os
from copy import deepcopy
import simulacra as si
from simulacra.units import *
import ionization as ion
import matplotlib.pyplot as plt
FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
OUT_DIR = os.path.join(os.getcwd(), "out", FILE_NAME)
if __name__ == "__main__":
with si.utils.LogManager(
"simulacra", "ionization", stdout_logs=True, stdout_level=logging.DEBUG
) as logger:
anim_kwargs = dict(length=10, target_dir=OUT_DIR)
epot_axman = animation.animators.ElectricPotentialPlotAxis(
show_electric_field=True,
show_vector_potential=False,
show_y_label=False,
show_ticks_right=True,
)
test_state_axman = animation.animators.TestStateStackplotAxis(
states=tuple(
ion.HydrogenBoundState(n, l) for n in range(5) for l in range(n)
)[:8]
)
wavefunction_axman = animation.animators.WavefunctionStackplotAxis(
states=(
ion.HydrogenBoundState(1, 0),
ion.HydrogenBoundState(2, 0),
ion.HydrogenBoundState(3, 1),
)
)
animators = [
animation.animators.PolarAnimator(
postfix="g2",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
shading="flat"
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(test_state_axman),
axman_colorbar=animation.animators.ColorBarAxis(),
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(test_state_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_angmom",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=animation.animators.AngularMomentumDecompositionAxis(
maximum_l=10
),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction_again",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction_again_hires",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
fig_dpi_scale=2,
**anim_kwargs,
),
]
sim = ion.SphericalHarmonicSpecification(
"sph_harm",
time_initial=0 * asec,
time_final=100 * asec,
r_bound=50 * bohr_radius,
l_bound=20,
r_points=200,
electric_potential=ion.potentials.Rectangle(
start_time=25 * asec,
end_time=75 * asec,
amplitude=1 * atomic_electric_field,
),
# test_states = (ion.HydrogenBoundState(n, l) for n in range(5) for l in range(n)),
use_numeric_eigenstates=True,
numeric_eigenstate_max_energy=10 * eV,
numeric_eigenstate_max_angular_momentum=5,
animators=animators,
).to_sim()
sim.info().log()
sim.run()
sim.info().log()
| [
"[email protected]"
] | |
72dff18867a5ecc45e8a6feb50567cf3be592ed6 | 6c951ca04d6c0db92b05972d651d370302d98a2c | /tests/test_sensitivity_analyzer.py | 35a1db44b33b09a91687ae8644cb8603a1c9727c | [
"MIT"
] | permissive | nickderobertis/sensitivity | 9309bba0aadbac6e8dba09e7c7b1477d063a6d6d | 8f0d0e676213772bdb8cbc8c6fc08fdba6dc6b53 | refs/heads/master | 2023-02-23T20:33:45.118907 | 2022-10-09T01:17:01 | 2022-10-09T01:17:01 | 239,607,375 | 12 | 0 | MIT | 2023-02-11T02:07:41 | 2020-02-10T20:33:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,995 | py | import uuid
from pandas.testing import assert_frame_equal
from sensitivity import SensitivityAnalyzer
from tests.base import EXPECT_DF_TWO_VALUE, SENSITIVITY_VALUES_TWO_VALUE, add_5_to_values, RESULT_NAME, \
SENSITIVITY_VALUES_THREE_VALUE, add_10_to_values, EXPECT_DF_THREE_VALUE, assert_styled_matches, \
DF_STYLED_NUM_FMT_PATH, assert_graph_matches, PLOT_THREE_PATH, PLOT_OPTIONS_PATH, TWO_VALUE_LABELS, DF_LABELED_PATH
class TestSensitivityAnalyzer:
def create_sa(self, **kwargs) -> SensitivityAnalyzer:
sa_config = dict(
sensitivity_values=SENSITIVITY_VALUES_TWO_VALUE,
func=add_5_to_values,
result_name=RESULT_NAME
)
sa_config.update(**kwargs)
sa = SensitivityAnalyzer(**sa_config)
return sa
def test_create(self):
sa = self.create_sa()
def test_create_df(self):
sa = self.create_sa()
assert_frame_equal(sa.df, EXPECT_DF_TWO_VALUE, check_dtype=False)
def test_create_df_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
assert_frame_equal(sa.df, EXPECT_DF_THREE_VALUE, check_dtype=False)
def test_create_styled_dfs(self):
sa = self.create_sa()
result = sa.styled_dfs()
assert_styled_matches(result)
def test_create_styled_dfs_with_num_fmt(self):
sa = self.create_sa(num_fmt='${:,.0f}')
result = sa.styled_dfs()
sa2 = self.create_sa()
result2 = sa2.styled_dfs(num_fmt='${:,.0f}')
assert_styled_matches(result, DF_STYLED_NUM_FMT_PATH)
assert_styled_matches(result2, DF_STYLED_NUM_FMT_PATH)
def test_create_styled_dfs_with_labels(self):
sa = self.create_sa(labels=TWO_VALUE_LABELS)
result = sa.styled_dfs()
assert_styled_matches(result, DF_LABELED_PATH)
def test_create_styled_dfs_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
result = sa.styled_dfs()
def test_create_plot(self):
sa = self.create_sa()
result = sa.plot()
assert_graph_matches(result)
def test_create_plot_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
result = sa.plot()
assert_graph_matches(result, file_path=PLOT_THREE_PATH)
def test_create_plot_with_options(self):
options = dict(
grid_size=2, color_map='viridis', reverse_colors=True
)
sa = self.create_sa(labels=TWO_VALUE_LABELS, **options)
result = sa.plot()
assert_graph_matches(result, file_path=PLOT_OPTIONS_PATH)
sa = self.create_sa(labels=TWO_VALUE_LABELS)
result = sa.plot(**options)
assert_graph_matches(result, file_path=PLOT_OPTIONS_PATH)
| [
"[email protected]"
] | |
0497e0262a8ee739513125f73d20dec716f79060 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/cylicRot_20200714234806.py | 755b17fab1acf221b7f045ba530fc306bc41432f | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # given an array rotate it k times to the right
def rotate(arr,k):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# [1,2,3,4]
# [4,2,3,1]
# [4,1,3,2]
# [4,1,2,3]
# [4,1,2,3]
# all we are doing is swapping the elements
newArr = []
for i in range(len(arr)):
k = len(arr) - 1
print('k',k,'i',i)
arr[i],arr[k] = arr[k],arr[i]
print(arr)
rotate([1,2,3,4],4)
| [
"[email protected]"
] | |
e615006a23c81dc60b0a5cdc99d864b0a4c4a7d4 | c724fad90be2e221cb0f5c0005ebcfbdfdb35d27 | /backend/fitsii_19945/wsgi.py | cfa9f31b691c6399a7797d950bc243dc2bb70ac9 | [] | no_license | crowdbotics-apps/fitsii-19945 | d461349a510febd39f4edcaeb2b8b722664e3bf0 | 040621b4053e58b9c323ef7222a6a36465c4806e | refs/heads/master | 2022-12-07T18:18:50.580128 | 2020-09-02T16:56:11 | 2020-09-02T16:56:11 | 292,342,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for fitsii_19945 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fitsii_19945.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
c5968b91f1e8556b70007f764784c56df35cfef6 | 2c89037666a3c3c9be55b53055c73aa9fcbde2b7 | /webrobot/app/main/service/user_service.py | 1aa181ef641092046126c96166d66c61d9b54523 | [
"MIT"
] | permissive | kakawaa/Auto-Test-System | 844284de1eb5fac8fa8c5318371c99991caff62d | 76b0690e4e49769ec5d6e65ab6c499396880c0bd | refs/heads/master | 2020-06-17T11:42:38.121124 | 2019-07-05T03:32:39 | 2019-07-05T03:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | # import uuid
import datetime
import os
from pathlib import Path
from app.main import db
from app.main.model.database import User, Organization
from task_runner.runner import start_threads
from ..config import get_config
from ..util.errors import *
from ..util.identicon import *
USERS_ROOT = Path(get_config().USERS_ROOT)
def save_new_user(data, admin=None):
user = User.objects(email=data['email']).first()
if not user:
new_user = User(
# public_id=str(uuid.uuid4()),
email=data['email'],
name=data.get('username', ''),
registered_on=datetime.datetime.utcnow(),
roles=data.get('roles', ['admin']),
avatar=data.get('avatar', ''),
introduction=data.get('introduction', '')
)
new_user.password = data['password']
try:
new_user.save()
except Exception as e:
print(e)
return error_message(EINVAL, 'Field validating for User failed'), 401
user_root = USERS_ROOT / data['email']
try:
os.mkdir(user_root)
except FileExistsError as e:
return error_message(EEXIST), 401
try:
os.mkdir(user_root / 'test_results')
except FileExistsError as e:
return error_message(EEXIST), 401
if new_user.avatar == '':
img = render_identicon(hash(data['email']), 27)
img.save(user_root / ('%s.png' % new_user.id))
new_user.avatar = '%s.png' % new_user.id
if new_user.name == '':
new_user.name = new_user.email.split('@')[0]
if not admin:
organization = Organization(name='Personal')
organization.owner = new_user
organization.path = new_user.email
organization.save()
new_user.organizations = [organization]
new_user.save()
start_threads(new_user)
return generate_token(new_user)
else:
return error_message(USER_ALREADY_EXIST), 409
def get_all_users():
return User.objects()
def get_a_user(user_id):
return User.objects(pk=user_id).first()
def generate_token(user):
try:
# generate the auth token
auth_token = User.encode_auth_token(str(user.id))
return error_message(SUCCESS, token=auth_token.decode()), 201
except Exception as e:
print(e)
return error_message(UNKNOWN_ERROR), 401
| [
"[email protected]"
] | |
56a9016f9048bf93ced9d3230e3e07125c5674b2 | 01bd00e6498190aac53210689c111d72018956fa | /companies/migrations/0047_auto_20190917_1011.py | a0c9fdef406a96c4ea5f7cbf5a40000ea2755162 | [] | no_license | dchaplinsky/edrdr | 0494b31fe3a0ce54d0cf087fb11ef709cb002810 | e9fd5295f8c7ca7db81fce2427456e779ff6637e | refs/heads/master | 2022-06-01T07:01:59.049162 | 2020-10-12T08:04:42 | 2020-10-12T08:04:42 | 122,268,695 | 0 | 1 | null | 2022-04-22T20:52:45 | 2018-02-20T23:14:48 | CSS | UTF-8 | Python | false | false | 571 | py | # Generated by Django 2.2.3 on 2019-09-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('companies', '0046_pepowner_person_type'),
]
operations = [
migrations.AddField(
model_name='companyrecord',
name='charter_capital',
field=models.FloatField(default=None, null=True),
),
migrations.AddField(
model_name='companyrecord',
name='reg_date',
field=models.DateField(null=True),
),
]
| [
"[email protected]"
] | |
62468571196349acaac805658ec61d5532fcb955 | dc4a42ad81013a1fdaa0c6be0559504e17bacb7e | /products/admin.py | a845d9021b184ff03ccdeed387467a77c73d2d28 | [] | no_license | deone/eqsupply | 15afbda692779431357d2c69475da8503c4728b1 | 3af726b65c1658d364c6485ad36ef98d5c6e7fc3 | refs/heads/master | 2020-04-20T05:29:53.020966 | 2010-05-13T09:16:18 | 2010-05-13T09:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.contrib import admin
from eqsupply.products.models import *
admin.site.register(Division)
admin.site.register(Category)
admin.site.register(Product)
admin.site.register(Accessory)
admin.site.register(ProductVariant)
| [
"[email protected]"
] | |
07fed4cb0ac0a9c9fe7cf77a4577b118c598fd1f | 6147d3da9c7f31a658f13892de457ed5a9314b22 | /multithreading/without_threading.py | 4f637839a61975629dea515f930117251368c52c | [] | no_license | ashish-bisht/must_do_geeks_for_geeks | 17ba77608eb2d24cf4adb217c8e5a65980e85609 | 7ee5711c4438660db78916cf876c831259109ecc | refs/heads/master | 2023-02-11T22:37:03.302401 | 2021-01-03T05:53:03 | 2021-01-03T05:53:03 | 320,353,079 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import threading
import time
start = time.perf_counter()
def working_on_something():
print("Sleeping for a sec")
time.sleep(1)
print("Woke up")
working_on_something()
working_on_something()
finish = time.perf_counter()
print("total time taken is ", finish - start)
| [
"[email protected]"
] | |
9ab7745e8b4d48edd0fe67af3de20eca60454dcc | f59a3641f488dd40b0af4c0024a252170ab59998 | /chap4/p35.py | d89dca31848be92a9ad88a15209c75b1fe2ad076 | [] | no_license | ujiuji1259/NLP100 | 478a5276514d2f21ac5ee5ec9b50f00dcba67d1a | c19f9ba00eec108dbc93d4cb7d33e86f539d3397 | refs/heads/master | 2023-04-01T23:05:14.376652 | 2021-04-13T05:21:37 | 2021-04-13T05:21:37 | 255,311,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # mecab neko.txt > neko.txt.mecab
from p30 import load_mecab_output
import collections
if __name__ == '__main__':
lines = load_mecab_output('neko.txt.mecab')
lines = [l['surface'] for line in lines for l in line]
counter = collections.Counter(lines)
print(counter.most_common())
| [
"[email protected]"
] | |
f0ec9069cd636274166bcd07ca0cebc104ee447b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03598/s680963277.py | c8861d19ff2e2ce27d5b6a660a4fb273c93d87c7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | N = int(input())
K = int(input())
x = list(map(int, input().split()))
A=[]
B=[]
for i in range(len(x)):
a = min(2*(x[i]), 2*abs(K-x[i]))
A.append(a)
print(sum(A)) | [
"[email protected]"
] | |
0d843d4556bf97c40beacc40c239357fa08e4b8a | 05263538c3ad0f577cdbbdb9bac87dcf450230ce | /alexa/ask-sdk/ask_sdk_dynamodb/__version__.py | 5cfdf120d47b16330d48f329ae8c0e26ce048100 | [] | no_license | blairharper/ISS-GoogleMap-project | cea027324fc675a9a309b5277de99fc0265dcb80 | 3df119036b454a0bb219af2d703195f4154a2471 | refs/heads/master | 2020-03-21T16:47:21.046174 | 2018-10-24T08:05:57 | 2018-10-24T08:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | #
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
__pip_package_name__ = 'ask-sdk-dynamodb-persistence-adapter'
__description__ = (
'The ASK SDK DynamoDB Persistence Adapter package provides DynamoDB '
'Adapter, that can be used with ASK SDK Core, for persistence management')
__url__ = 'http://developer.amazon.com/ask'
__version__ = '0.1'
__author__ = 'Alexa Skills Kit'
__author_email__ = '[email protected]'
__license__ = 'Apache 2.0'
__keywords__ = ['ASK SDK', 'Alexa Skills Kit', 'Alexa', 'ASK SDK Core',
'Persistence', 'DynamoDB']
__install_requires__ = ["boto3", "ask-sdk-core"]
| [
"[email protected]"
] | |
d07d964851d7ea84722cc1c566fdb976f5049c0a | 10d98fecb882d4c84595364f715f4e8b8309a66f | /non_semantic_speech_benchmark/distillation/train_keras_test.py | 58293b999787e89c984afb7ffed56dbb033ecc48 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 3,089 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import tensorflow as tf
from non_semantic_speech_benchmark.distillation import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
bs = kwargs['batch_size']
samples = tf.zeros((bs, 16000), tf.float32)
targets = tf.ones([bs, 10], tf.float32)
return tf.data.Dataset.from_tensors((samples, targets)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'bottleneck_dimension': 3, 'alpha': 1.0},
{'bottleneck_dimension': 5, 'alpha': 0.5},
)
def test_get_model(self, bottleneck_dimension, alpha):
batched_samples = tf.zeros([3, 16000])
output_dimension = 10
targets = tf.ones([3, output_dimension])
model = train_keras.models.get_keras_model(
f'mobilenet_debug_{alpha}_False',
bottleneck_dimension=bottleneck_dimension,
output_dimension=output_dimension)
loss_obj = tf.keras.losses.MeanSquaredError()
opt = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.MeanSquaredError()
train_mae = tf.keras.metrics.MeanAbsoluteError()
summary_writer = tf.summary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_mae, summary_writer)
gstep = opt.iterations
train_step(batched_samples, targets, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, targets, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@mock.patch.object(train_keras.hub, 'load')
@flagsaver.flagsaver
def test_full_flow(self, mock_load):
del mock_load
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.teacher_model_hub = 'dummy'
flags.FLAGS.output_key = 'dummmy'
flags.FLAGS.bottleneck_dimension = 2
flags.FLAGS.output_dimension = 10
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
absltest.main()
| [
"[email protected]"
] | |
ba106a98267a6ec0d424113b2870654dbf4698b9 | 3154e6d1a9e9e9919cae75570969da36c45429d7 | /codigo/tutorial/tut0C_camara.py | 9e54589237d6c51292d941cdce95c822a95243c0 | [] | no_license | javacasm/TutorialPyGame | 0d458c7155794668fc1464c466e4d740b3ac77ee | baeb7ce5dda151f8093e39f8b14182a8ee5de926 | refs/heads/master | 2021-07-25T20:01:04.504958 | 2021-05-10T12:33:26 | 2021-05-10T12:33:26 | 250,080,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | https://www.pygame.org/docs/tut/CameraIntro.html
```python
class Capture(object):
def __init__(self):
self.size = (640,480)
# create a display surface. standard pygame stuff
self.display = pygame.display.set_mode(self.size, 0)
# this is the same as what we saw before
self.clist = pygame.camera.list_cameras()
if not self.clist:
raise ValueError("Sorry, no cameras detected.")
self.cam = pygame.camera.Camera(self.clist[0], self.size)
self.cam.start()
# create a surface to capture to. for performance purposes
# bit depth is the same as that of the display surface.
self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
def get_and_flip(self):
# if you don't want to tie the framerate to the camera, you can check
# if the camera has an image ready. note that while this works
# on most cameras, some will never return true.
if self.cam.query_image():
self.snapshot = self.cam.get_image(self.snapshot)
# blit it to the display surface. simple!
self.display.blit(self.snapshot, (0,0))
pygame.display.flip()
def main(self):
going = True
while going:
events = pygame.event.get()
for e in events:
if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
# close the camera safely
self.cam.stop()
going = False
self.get_and_flip()
``` | [
"[email protected]"
] | |
f62111deb74e279775448c7d5a97f5ea7f6a8255 | 9f835d53232e954805b7ed1d93889e409209b36b | /1541_복습.py | 134932438e9def1182112113c24eb401c83df29d | [] | no_license | dmswl0311/Baekjoon | 7c8a862fceff086b3d7740eef23b80164e1d5aeb | 22040aff6b64d5081e86d91b0d118d1a718a4316 | refs/heads/master | 2023-04-29T13:48:51.448245 | 2021-05-26T14:35:32 | 2021-05-26T14:35:32 | 323,482,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | s = input().split('-')
sum = 0
result = []
for i in s:
if '+' in i:
a = i.split('+')
for j in a:
sum += int(j)
result.append(sum)
else:
result.append(int(i))
minus = result[0]
for i in range(1, len(result)):
minus -= result[i]
print(minus)
| [
"[email protected]"
] | |
bcdcdba6ff316a16065b95a2bba284abc290a417 | 9d25d1205da84db33bc425266bc3021cd7529cb1 | /digitalearthau/testing/plugin.py | b73fdee1aba0e11cd5d8c9a183a595c1b7c6e754 | [] | no_license | GeoscienceAustralia/digitalearthau | 9068970b2794a4ac55a34f910caa5877b548bb37 | 4cf486eb2a93d7de23f86ce6de0c3af549fe42a9 | refs/heads/develop | 2023-06-22T14:31:41.516829 | 2022-11-14T05:22:05 | 2022-11-14T05:22:05 | 51,411,119 | 31 | 21 | null | 2023-06-14T06:36:31 | 2016-02-10T00:16:36 | Python | UTF-8 | Python | false | false | 2,081 | py | import itertools
import os
import pytest
from pathlib import Path
from typing import Iterable
import datacube
import digitalearthau
import digitalearthau.system
from datacube.config import LocalConfig
from . import factories
# These are unavoidable in pytests due to fixtures
# pylint: disable=redefined-outer-name,protected-access,invalid-name
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
# The default test config options.
# The user overrides these by creating their own file in ~/.datacube_integration.conf
INTEGRATION_DEFAULT_CONFIG_PATH = Path(__file__).parent.joinpath('testing-default.conf')
def pytest_report_header(config):
if config.getoption('verbose') > 0:
return (
f"digitaleathau {digitalearthau.__version__}, "
f"opendatacube {datacube.__version__}"
)
return None
@pytest.fixture(scope='session')
def integration_config_paths():
if not INTEGRATION_DEFAULT_CONFIG_PATH.exists():
# Safety check. We never want it falling back to the default config,
# as it will alter/wipe the user's own datacube to run tests
raise RuntimeError(
'Integration default file not found. This should be built-in?')
return (
str(INTEGRATION_DEFAULT_CONFIG_PATH),
os.path.expanduser('~/.datacube_integration.conf')
)
@pytest.fixture(scope='session')
def global_integration_cli_args(integration_config_paths: Iterable[str]):
"""
The first arguments to pass to a cli command for integration test configuration.
"""
# List of a config files in order.
return list(
itertools.chain(*(('--config_file', f) for f in integration_config_paths)))
@pytest.fixture(scope='session')
def local_config(integration_config_paths):
return LocalConfig.find(integration_config_paths)
# Default fixtures which will drop/create on every individual test function.
db = factories.db_fixture('local_config')
index = factories.index_fixture('db')
dea_index = factories.dea_index_fixture('index')
| [
"[email protected]"
] | |
d1ddaf333839d2b4c77c8c4265b2240ac9836035 | 8d6fa96da4220ba886ef8e858f1925b6dca34e58 | /examples/wtf/wtf/config.py | 7cf539ff078f59cb14f772090950734c0d091acb | [] | no_license | FZambia/cyclone-wtforms | 6ee26c920171685e027529e8f1fbb99c765edc30 | c266b5f3bfff77e3a721b3335b74a294966f7daf | refs/heads/master | 2016-09-05T15:23:08.336180 | 2012-10-05T18:55:00 | 2012-10-05T18:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,041 | py | # coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ConfigParser
from cyclone.util import ObjectDict
def xget(func, section, option, default=None):
try:
return func(section, option)
except:
return default
def parse_config(filename):
cfg = ConfigParser.RawConfigParser()
with open(filename) as fp:
cfg.readfp(fp)
fp.close()
settings = {'raw': cfg}
# web server settings
settings["debug"] = xget(cfg.getboolean, "server", "debug", False)
settings["xheaders"] = xget(cfg.getboolean, "server", "xheaders", False)
settings["cookie_secret"] = cfg.get("server", "cookie_secret")
settings["xsrf_cookies"] = xget(cfg.getboolean, "server", "xsrf_cookies",
False)
# get project's absolute path
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
getpath = lambda k, v: os.path.join(root, xget(cfg.get, k, v))
# locale, template and static directories' path
settings["locale_path"] = getpath("frontend", "locale_path")
settings["static_path"] = getpath("frontend", "static_path")
settings["template_path"] = getpath("frontend", "template_path")
# sqlite support
if xget(cfg.getboolean, "sqlite", "enabled", False):
settings["sqlite_settings"] = ObjectDict(database=cfg.get("sqlite",
"database"))
else:
settings["sqlite_settings"] = None
# redis support
if xget(cfg.getboolean, "redis", "enabled", False):
settings["redis_settings"] = ObjectDict(
host=cfg.get("redis", "host"),
port=cfg.getint("redis", "port"),
dbid=cfg.getint("redis", "dbid"),
poolsize=cfg.getint("redis", "poolsize"))
else:
settings["redis_settings"] = None
# mysql support
if xget(cfg.getboolean, "mysql", "enabled", False):
settings["mysql_settings"] = ObjectDict(
host=cfg.get("mysql", "host"),
port=cfg.getint("mysql", "port"),
username=xget(cfg.get, "mysql", "username"),
password=xget(cfg.get, "mysql", "password"),
database=xget(cfg.get, "mysql", "database"),
poolsize=xget(cfg.getint, "mysql", "poolsize", 10),
debug=xget(cfg.getboolean, "mysql", "debug", False))
else:
settings["mysql_settings"] = None
return settings
| [
"[email protected]"
] | |
128efb9b492a29c2e87a97b932e626a724b6af9f | 52b9016932aa426eeaaade5d856af6a1a771683f | /tests/testapp/serializers.py | 3c4be81a47c21da377120bda5b7ee7eb6deb647d | [
"MIT"
] | permissive | marlncpe/django-rest-pandas | 33033627d88c6467a9677133402fb519d5ea5a75 | 89a93c3ce8d30688f9137f5a9beacc7d63f621e0 | refs/heads/master | 2021-01-23T11:55:02.722962 | 2017-09-01T20:47:46 | 2017-09-01T20:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | from rest_framework.serializers import ModelSerializer
from rest_framework import serializers
from rest_pandas import PandasUnstackedSerializer
from .models import TimeSeries, MultiTimeSeries, ComplexTimeSeries
class TimeSeriesSerializer(ModelSerializer):
date = serializers.DateField(format=None)
class Meta:
model = TimeSeries
fields = '__all__'
class TimeSeriesNoIdSerializer(TimeSeriesSerializer):
class Meta:
model = TimeSeries
exclude = ['id']
class MultiTimeSeriesSerializer(ModelSerializer):
class Meta:
model = MultiTimeSeries
exclude = ['id']
pandas_index = ['date']
pandas_unstacked_header = ['series']
pandas_scatter_coord = ['series']
pandas_boxplot_group = 'series'
pandas_boxplot_date = 'date'
class ComplexTimeSeriesSerializer(ModelSerializer):
class Meta:
model = ComplexTimeSeries
exclude = ['id']
pandas_index = ['date', 'type']
pandas_unstacked_header = ['site', 'parameter', 'units']
class ComplexScatterSerializer(ComplexTimeSeriesSerializer):
class Meta(ComplexTimeSeriesSerializer.Meta):
exclude = ['id', 'flag']
pandas_scatter_coord = ['units', 'parameter']
pandas_scatter_header = ['site']
class ComplexBoxplotSerializer(ComplexTimeSeriesSerializer):
class Meta(ComplexTimeSeriesSerializer.Meta):
exclude = ['id', 'flag', 'type']
pandas_boxplot_group = 'site'
pandas_boxplot_date = 'date'
pandas_boxplot_header = ['units', 'parameter']
class NotUnstackableSerializer(ModelSerializer):
class Meta:
model = MultiTimeSeries
fields = '__all__'
list_serializer_class = PandasUnstackedSerializer
# pandas_unstacked_header = Missing
pandas_index = ['series']
| [
"[email protected]"
] | |
626e284b40ec0447bfcba31a165d86827eb7df2a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gHrMmA7emP6CFAMnb_6.py | 35eeb43f5be552b55e650249bf1ff464b8e37754 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py |
def is_apocalyptic(n):
L=str(2**n).split('666')
if len(L)==1:
return "Safe"
elif len(L)==2:
return "Single"
elif len(L)==3:
return "Double"
elif len(L)==4:
return "Triple"
| [
"[email protected]"
] | |
4e8d14003c2e112ef076b89c4c8a3ad6613f9a2c | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/customization_failed.py | b63b14e03d5fddb6d06ae4f32d77239d433f8930 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def CustomizationFailed(vim, *args, **kwargs):
'''The customization sequence in the guest failed.'''
obj = vim.client.factory.create('{urn:vim25}CustomizationFailed')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'logLocation', 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.