blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f788150cb65d8a9dd0618a8bae8840a7efe7aac | b788f1f8bfa8949177e28dd4be436572162c418b | /regular expression.py | 5622c6603c31b7c63a0a789938965fc66832786f | [] | no_license | KaziMotiour/pyhton-OOP | bc9506f3afe7686a7451de9a5448c759f3cdcbac | 8e85cbe31809a11293fb90d6e39b2d0293cff9b5 | refs/heads/master | 2022-02-18T09:40:05.274955 | 2019-09-02T17:46:53 | 2019-09-02T17:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | import re
def multi_find(text_patterns,phrase):
for pat in test_patterns:
print("Search for pattern {}".format(pat))
print(re.findall(pat,phrase))
print('\n')
patterns = ['Team1', 'Team2']
text = "This is a starting! with Team1, 1234567, not to others arr arrr"
# for pattern in patterns:
# print("I'm searching for: "+pattern)
# if re.search(pattern,text):
# #if pattern in text:
# print("Match")
# else:
# print("Not Match")
# match = re.search('Team1',text)
# print(match.start())
# textSplit = re.split('with', text)
# print(textSplit)
# print(re.findall('a', text))
# test_patterns = ['ar*']
# test_patterns = ['ar+']
# test_patterns = ['ar{2}']
# test_patterns = ['ar{1,2}']
# test_patterns = ['[^!>?]+']
# test_patterns = ['[a-z]+'] # show all the lowercase in text
# test_patterns = ['[A-Z]+'] # show all the uppercase in text
# test_patterns = [r'\d'] # show all the number in text
# test_patterns = [r'\d+'] # show all the number in text
# test_patterns = [r'\D+'] # show all the text except number in text
test_patterns = [r'\w+'] # show all the text alpha numeric in text
multi_find(test_patterns,text) | [
"[email protected]"
] | |
321383aac6ddb384a5de4743a8d8fba4a11a44cc | a6d36a861c156e9dd9c3f4733978f194bcc62c2c | /api/serializers.py | b284259aa9cd0eee350124e29334949953db0bd5 | [] | no_license | wjaccck/upfile | 091f3ba132748cef348ff8a9973eba009e5423fa | 2721cc29ca394ddcf9f415e4fba7e2b422e87701 | refs/heads/master | 2021-01-01T04:30:18.024584 | 2016-05-26T02:25:51 | 2016-05-26T02:25:51 | 57,368,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | from rest_framework import serializers
from api.models import Up_file,Status,Azure_key,Dirs,Recode_dirs
class Up_fileSerializer(serializers.ModelSerializer):
status = serializers.SlugRelatedField(queryset=Status.objects.all(), slug_field='alias')
base_dir = serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name')
class Meta:
model = Up_file
fields = ('url', 'id', 'base_dir','blob_name', 'blob_url','file_name', 'file_md5', 'file_location',
'department','status', 'modified_date', 'created_date')
class StatusSerializer(serializers.ModelSerializer):
class Meta:
model = Status
class Azure_keySerializer(serializers.ModelSerializer):
class Meta:
model = Azure_key
class DirsSerializer(serializers.ModelSerializer):
class Meta:
model = Dirs
class Recode_dirsSerializer(serializers.ModelSerializer):
base_dir=serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name')
sub_dir = serializers.SlugRelatedField(queryset=Dirs.objects.all(), slug_field='name',many=True)
sub_files=serializers.SerializerMethodField()
class Meta:
model = Recode_dirs
fields = ('url', 'id', 'base_dir', 'sub_dir', 'sub_files')
def get_sub_files(self,obj):
base_dir=obj.base_dir
result=[]
for m in Up_file.objects.filter(base_dir=base_dir):
status = m.status.alias
if status == 'upload':
sig = Azure_key.objects.get(name='azure').sig
url = m.blob_url + '?' + sig
else:
url = ''
data = {}
data['status'] = status
data['file'] = m.file_name
data['url'] = url
data['created_date']=m.created_date
data['department']=m.department
result.append(data)
total={"total":len(result)}
result.append(total)
return result
| [
"[email protected]"
] | |
6b307266c03ec45f6004645eac1d4985b1bfbb4c | d5a5ff1ed1f508c47e9506a552bf44844bcdc071 | /payroll/apps.py | 8313bd0aaa30a056f07efb95e1823ad6458d08af | [] | no_license | sintaxyzcorp/prometeus | 5c9dc20e3c2f33ea6b257b850ff9505621302c47 | 2508603b6692023e0a9e40cb6cd1f08465a33f1c | refs/heads/master | 2021-09-01T09:31:36.868784 | 2017-12-26T07:58:27 | 2017-12-26T07:58:27 | 113,787,842 | 0 | 1 | null | 2017-12-18T08:25:31 | 2017-12-10T22:16:28 | JavaScript | UTF-8 | Python | false | false | 182 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class PayrollConfig(AppConfig):
name = 'payroll'
verbose_name = 'Nomina'
| [
"[email protected]"
] | |
da837fb82085ba56a201b6894220c72ba25ea444 | 38182d45f0b1f6228aeec03a876ee8213404d171 | /questionnaire/admin.py | be8327b50af90b1628c99da556843bb64cf84a85 | [] | no_license | alexzinoviev/MobileDoc | 1283ec5cd52d27510e54f22522b9e1a01b65d8f8 | 66c22f1b8fe96ad5630c3d33bcc26e5d815f48db | refs/heads/master | 2020-06-24T05:29:41.366198 | 2017-08-03T16:37:10 | 2017-08-03T16:37:10 | 96,920,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django.contrib import admin
from .models import Questionnaire
# Register your models here.
@admin.register(Questionnaire)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question', 'category')
#admin.site.register(Questionnaire, QuestionAdmin)
# @admin.register(Product)
# class ProductAdmin(admin.ModelAdmin):
# #pass
# prepopulated_fields = {'slug': ('name',)}
# list_display = ('name','desc', 'cost', 'active') | [
"[email protected]"
] | |
51f34c3e0287e316f0918f7bae364df3289de792 | 966ea314bcd64f40bfaea457f914fcedbe26426a | /March-week3/testconversion.py | be419779776866290ed22ba1214ccc83499f7eda | [] | no_license | vandanasen/Python-Projects | 30caa85cf87ba712e1307b0441fed2d7fa9298a0 | 9b24a9f6af0374bb0d6a3a15c05099f49edfd581 | refs/heads/master | 2020-03-26T00:26:06.067905 | 2019-03-11T22:58:25 | 2019-03-11T22:58:25 | 144,320,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | a_list=[1,1,2,3,3]
a = tuple(a_list)
print(a)
b = list(a)
print(len(b))
c = set(b)
print(len(c))
d=list(c)
print(len(d))
e=list(range(1, 11, 1))
print(e)
dict = dict([(1,2),(3,4),(5,6),(7,8),(9,10)])
print(dict)
t= tuple(list(dict.items()))
print(t)
v = tuple(dict.keys())
print(v)
k = tuple(dict.values())
print(k)
s = "antidisestablishmentarianism"
print(s)
s = sorted(s)
print(s)
s2="".join(s)
print(s2)
w = "the quick brown fox jumped over the lazy dog"
w = w.split()
print(w) | [
"[email protected]"
] | |
7f9cf2b44780a6c73735f0b55eb8a5f232bd2098 | 88e2c87d087e30dedda11cad8a2665e89f6ac32c | /tests/contrib/operators/test_opsgenie_alert_operator.py | 1b4467bc5a523be4b00ce8c701d2f578da10ece5 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | bigo-sg/airflow | 690805b782d3490c5d01047203ee4766f9695cf0 | e2933fc90d8fd9aeb61402f7a237778553762a17 | refs/heads/master | 2020-05-30T19:25:36.289802 | 2019-07-15T10:14:34 | 2019-07-15T10:14:34 | 189,924,188 | 2 | 1 | Apache-2.0 | 2019-10-18T06:30:14 | 2019-06-03T02:50:51 | Python | UTF-8 | Python | false | false | 4,788 | py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.opsgenie_alert_operator import OpsgenieAlertOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestOpsgenieAlertOperator(unittest.TestCase):
_config = {
'message': 'An example alert message',
'alias': 'Life is too short for no alias',
'description': 'Every alert needs a description',
'responders': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'NOC', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': '[email protected]', 'type': 'user'},
{'id': 'aee8a0de-c80f-4515-a232-501c0bc9d715', 'type': 'escalation'},
{'name': 'Nightwatch Escalation', 'type': 'escalation'},
{'id': '80564037-1984-4f38-b98e-8a1f662df552', 'type': 'schedule'},
{'name': 'First Responders Schedule', 'type': 'schedule'}
],
'visibleTo': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'rocket_team', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': '[email protected]', 'type': 'user'}
],
'actions': ['Restart', 'AnExampleAction'],
'tags': ['OverwriteQuietHours', 'Critical'],
'details': {'key1': 'value1', 'key2': 'value2'},
'entity': 'An example entity',
'source': 'Airflow',
'priority': 'P1',
'user': 'Jesse',
'note': 'Write this down'
}
expected_payload_dict = {
'message': _config['message'],
'alias': _config['alias'],
'description': _config['description'],
'responders': _config['responders'],
'visibleTo': _config['visibleTo'],
'actions': _config['actions'],
'tags': _config['tags'],
'details': _config['details'],
'entity': _config['entity'],
'source': _config['source'],
'priority': _config['priority'],
'user': _config['user'],
'note': _config['note']
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_build_opsgenie_payload(self):
# Given / When
operator = OpsgenieAlertOperator(
task_id='opsgenie_alert_job',
dag=self.dag,
**self._config
)
payload = operator._build_opsgenie_payload()
# Then
self.assertEqual(self.expected_payload_dict, payload)
def test_properties(self):
# Given / When
operator = OpsgenieAlertOperator(
task_id='opsgenie_alert_job',
dag=self.dag,
**self._config
)
self.assertEqual('opsgenie_default', operator.opsgenie_conn_id)
self.assertEqual(self._config['message'], operator.message)
self.assertEqual(self._config['alias'], operator.alias)
self.assertEqual(self._config['description'], operator.description)
self.assertEqual(self._config['responders'], operator.responders)
self.assertEqual(self._config['visibleTo'], operator.visibleTo)
self.assertEqual(self._config['actions'], operator.actions)
self.assertEqual(self._config['tags'], operator.tags)
self.assertEqual(self._config['details'], operator.details)
self.assertEqual(self._config['entity'], operator.entity)
self.assertEqual(self._config['source'], operator.source)
self.assertEqual(self._config['priority'], operator.priority)
self.assertEqual(self._config['user'], operator.user)
self.assertEqual(self._config['note'], operator.note)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a0083cab532c5db426c3e4e1e0041d4f1d5ec536 | 0cfb5831a748ebd46e438e3ad7e7a09c1d196499 | /com/chapter_09/section_04/task_9.4.5_importAllClass.py | 92d4e7f85081ee09dbfc6731f3670ef472dcf5a0 | [] | no_license | StevenGeGe/pythonFromIntroductionToPractice01 | 7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108 | 9d2ba499056b30ded14180e6c4719ee48edd9772 | refs/heads/master | 2023-02-15T04:08:59.878711 | 2020-12-28T13:27:55 | 2020-12-28T13:27:55 | 310,980,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/25 20:34
# @Author : Yong
# @Email : [email protected]
# @File : task_9.4.5_importAllClass.py
# @Software: PyCharm
# 导入模块中所有的类
# from module_name import *
# 不推荐这样使用。
# 推荐使用:module_name.class_name 语法来访问类
| [
"[email protected]"
] | |
babdbff65d7df7830fbc35159f977fcaebc87b48 | 7be7190aeceef43841274518d260bcd92e04e5a7 | /Mahouo-Account/sever/app/__init__.py | a0d44f9b8c1ba491d8fa05edb03452397aa3f1ee | [] | no_license | weivis/Mahouo | 078c440b41a686d355a49e3fc29175bc225dff2c | 81fd6919a884b97cb53ac3e97f1e48d78ddd4e63 | refs/heads/master | 2020-04-20T16:56:44.813853 | 2019-02-03T18:47:11 | 2019-02-03T18:47:11 | 168,974,099 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | __author__ = 'Ran'
from flask import Flask # flask
from flask_cache import Cache # cache
from flask_login import LoginManager
from flask_cors import *
from flask_sqlalchemy import SQLAlchemy # sql
from datetime import timedelta
from app import config # config
#实例化app
app = Flask(__name__,
template_folder='templates', #指定模板路径,可以是相对路径,也可以是绝对路径。
static_folder='static', #指定静态文件前缀,默认静态文件路径同前缀
)
#引入全局配置
app.config.from_object(config)
app.permanent_session_lifetime = timedelta(days=7)
#跨域密匙
app.secret_key = '\x12my\x0bVO\xeb\xf8\x18\x15\xc5_?\x91\xd7h\x06AC'
#配置flasklogin
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.account_login'
login_manager.init_app(app=app)
#绑定对象
db = SQLAlchemy(app)
cache = Cache(app)
cache.init_app(app) | [
"[email protected]"
] | |
703dc8683d7f928f96e719bf5febd0627d683364 | 9a9e0398f26cee9864d48c4618c0a482e5475e83 | /Python/code/top_k_frequent_elements.py | 1e0b45c6c2186a3d5aa1760acecb875e104754cb | [] | no_license | CNife/leetcode | 92693c653bb41780ee431293286c3e909009e9b0 | 7cdd61692ecb52dd1613169e80b924dd39d35996 | refs/heads/main | 2021-06-22T21:22:12.997253 | 2021-03-18T07:07:15 | 2021-03-18T07:07:15 | 206,955,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from collections import defaultdict
from heapq import heappush, heapreplace
from typing import List, Tuple
from leetcode import test, sorted_list
def top_k_frequent(nums: List[int], k: int) -> List[int]:
counter = defaultdict(lambda: 0)
for num in nums:
counter[num] += 1
heap: List[Tuple[int, int]] = []
for num, count in counter.items():
if len(heap) < k:
heappush(heap, (count, num))
elif heap[0][0] < count:
heapreplace(heap, (count, num))
return [t[1] for t in heap]
test(
top_k_frequent,
[
([1, 1, 1, 2, 2, 3], 2, [1, 2]),
([1], 1, [1]),
],
map_func=sorted_list,
)
| [
"[email protected]"
] | |
9c68c1a48ea5e7f1c1e9b39fb95197c685595749 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02260/s279877008.py | 38cfba9d040e317dbba645db4cba4794e44c61a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | input()
num_list = raw_input().split()
num_list = map(int, num_list)
def selection_sort(num_list, count):
for i in range(0, len(num_list)):
minj = i
for j in range(i, len(num_list)):
if num_list[j] < num_list[minj]:
minj = j
temp = num_list[minj]
if minj != i:
num_list[minj] = num_list[i]
num_list[i]= temp
count += 1
i += 1
return count, num_list
count = 0
count, num_list = selection_sort(num_list, count)
num_list = map(str, num_list)
print " ".join(num_list)
print count | [
"[email protected]"
] | |
39b8c01806e7f01b801d077e55cdbe99b11dd5a9 | 0883188e1648f982e3a27bf0b89c4c09dac3d3ef | /nmigen/test/compat/test_fifo.py | bc6b81cdee56cf3921aa32628db05c3a8a6097be | [
"BSD-2-Clause"
] | permissive | pbsds/nmigen | b44c0b212ddd2d88a6641243efbb632baacb66f7 | d964ba9cc45490b141c8c4c4c3d8add1a26a739d | refs/heads/master | 2022-12-04T10:32:52.573521 | 2020-07-31T13:17:39 | 2020-07-31T18:41:59 | 286,076,534 | 0 | 0 | BSD-2-Clause | 2020-08-08T16:12:24 | 2020-08-08T16:12:23 | null | UTF-8 | Python | false | false | 1,272 | py | import unittest
from itertools import count
from ...compat import *
from ...compat.genlib.fifo import SyncFIFO
from .support import SimCase
class SyncFIFOCase(SimCase, unittest.TestCase):
class TestBench(Module):
def __init__(self):
self.submodules.dut = SyncFIFO(64, 2)
self.sync += [
If(self.dut.we & self.dut.writable,
self.dut.din[:32].eq(self.dut.din[:32] + 1),
self.dut.din[32:].eq(self.dut.din[32:] + 2)
)
]
def test_run_sequence(self):
seq = list(range(20))
def gen():
for cycle in count():
# fire re and we at "random"
yield self.tb.dut.we.eq(cycle % 2 == 0)
yield self.tb.dut.re.eq(cycle % 3 == 0)
# the output if valid must be correct
if (yield self.tb.dut.readable) and (yield self.tb.dut.re):
try:
i = seq.pop(0)
except IndexError:
break
self.assertEqual((yield self.tb.dut.dout[:32]), i)
self.assertEqual((yield self.tb.dut.dout[32:]), i*2)
yield
self.run_with(gen())
| [
"[email protected]"
] | |
0d6bf526fcc135ca7f156726c43622f99a0c3269 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_053/ch160_2020_06_19_19_49_19_966261.py | 0eb7281b995c2be04327db8c79ad0fcf74f9446d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import math
resultados = {}
valores = []
i = 0
for x in range(91):
# Aplica fórmula
y = 4*x*(180 - x)/(40500 - x*(180 - x))
# Converte para rad
x = x*math.pi/180
# Verifica diferença
dif = abs(y - math.sin(x))
# Adiciona na lista de diferenças
valores.append(dif)
# Adiciona diferença com índice no dicionário
resultados[i] = dif
i += 1
for indice, diferenca in resultados.items():
if diferenca == max(valores):
print(indice)
break | [
"[email protected]"
] | |
cee5b8650269efe733b6f7b95dcc8366a0fa8d3b | ba919c512e131de90427b1a6bfd29e1d7a2e22c8 | /debug/verification_test.py | a509b9d96a2cf195f0bfa7b8082cccaa8b3446a1 | [] | no_license | qq183727918/influence | 7d3b0106db55402630979b86455e4b82ebed2e98 | 75cb04453278d13dd82a6f319d6f9ecdfad5fb88 | refs/heads/master | 2023-01-22T23:00:51.979543 | 2020-12-08T11:12:12 | 2020-12-08T11:12:12 | 317,783,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # _*_ coding: UTF-8 _*_
# @Time : 2020/12/4 17:01
# @Author : LiuXiaoQiang
# @Site : http:www.cdtest.cn/
# @File : verification_test.py
# @Software : PyCharm
def verification():
from PIL import Image
# 转换灰度
# 使用路径导入图片
imgName = '13.png'
im = Image.open(imgName)
# 使用 byte 流导入图片
# im = Image.open(io.BytesIO(b))
# 转化到灰度图
imgry = im.convert('L')
# 保存图像
imgry.save('gray-' + imgName)
# 二值化降噪的过程
from PIL import Image, ImageEnhance, ImageFilter
im = Image.open('../verification/gray-13.png')
im = im.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(im)
im = enhancer.enhance(2)
im = im.convert('1')
im.show()
im.save('./1213.png')
verification()
from PIL import Image
import pytesseract
# pytesseract.pytesseract.tesseract_cmd = r'D:\Tools\tesseract\Tesseract-OCR/tesseract.exe'
image = Image.open("../verification/gray-13.png")
code = pytesseract.image_to_string(image, None)
print(code)
| [
"[email protected]"
] | |
05881bf793aa55eee51c75d99cdbe7a1085333a9 | 86fc644c327a8d6ea66fd045d94c7733c22df48c | /scripts/managed_cpe_services/customer/qos_service/policy_class_map_update/update_policy_class_map/update_policy_class_map.py | d79055588ad5e2c89764db215cca6b39ed2e3bd7 | [] | no_license | lucabrasi83/anutacpedeployment | bfe703657fbcf0375c92bcbe7560051817f1a526 | 96de3a4fd4adbbc0d443620f0c53f397823a1cad | refs/heads/master | 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,843 | py | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#DO NOT EDIT THIS FILE ITS AUTOGENERATED ONE
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO service_customization.py FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
qos-service
|
policy-class-map-update
|
update-policy-class-map
Schema Representation:
/services/managed-cpe-services/customer/qos-service/policy-class-map-update/update-policy-class-map
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
import service_customization
class UpdatePolicyClassMap(yang.AbstractYangServiceHandler):
_instance = None
def __init__(self):
self.delete_pre_processor = service_customization.DeletePreProcessor()
self.create_pre_processor = service_customization.CreatePreProcessor()
def create(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.create_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
devbindobjs={}
inputdict = {}
# START OF FETCHING THE LEAF PARAMETERS
inputdict['name'] = config.get_field_value('name')
inputdict['policy_name'] = config.get_field_value('policy_name')
inputdict['update_profile'] = config.get_field_value('update_profile')
inputdict['apply_to_sites'] = config.get_field_value('apply_to_sites')
inputdict['apply_to_device_group'] = config.get_field_value('apply_to_device_group')
inputdict['device_group'] = config.get_field_value('device_group')
inputdict['class1'] = config.get_field_value('class')
inputdict['packet_handling'] = config.get_field_value('packet_handling')
inputdict['percentage'] = config.get_field_value('percentage')
inputdict['queue_limit'] = config.get_field_value('queue_limit')
inputdict['packets'] = config.get_field_value('packets')
inputdict['qos_group'] = config.get_field_value('qos_group')
inputdict['single_cpe_site'] = config.get_field_value('single_cpe_site')
inputdict['single_cpe_sites'] = config.get_field_value('single_cpe_sites')
if inputdict['single_cpe_sites'] is None:
inputdict['single_cpe_sites'] = '[]'
inputdict['dual_cpe_site'] = config.get_field_value('dual_cpe_site')
inputdict['dual_cpe_sites'] = config.get_field_value('dual_cpe_sites')
if inputdict['dual_cpe_sites'] is None:
inputdict['dual_cpe_sites'] = '[]'
inputdict['single_cpe_dual_wan_site'] = config.get_field_value('single_cpe_dual_wan_site')
inputdict['single_cpe_dual_wan_sites'] = config.get_field_value('single_cpe_dual_wan_sites')
if inputdict['single_cpe_dual_wan_sites'] is None:
inputdict['single_cpe_dual_wan_sites'] = '[]'
inputdict['triple_cpe_site'] = config.get_field_value('triple_cpe_site')
inputdict['triple_cpe_sites'] = config.get_field_value('triple_cpe_sites')
if inputdict.get('triple_cpe_sites') is None:
inputdict['triple_cpe_sites'] = '[]'
inputdict['dual_cpe_dual_wan_site'] = config.get_field_value('dual_cpe_dual_wan_site')
inputdict['dual_cpe_dual_wan_sites'] = config.get_field_value('dual_cpe_dual_wan_sites')
if inputdict.get('dual_cpe_dual_wan_sites') is None:
inputdict['dual_cpe_dual_wan_sites'] = '[]'
# END OF FETCHING THE LEAF PARAMETERS
inputkeydict = {}
# START OF FETCHING THE PARENT KEY LEAF PARAMETERS
inputkeydict['managed_cpe_services_customer_name'] = sdata.getRcPath().split('/')[-4].split('=')[1]
# END OF FETCHING THE PARENT KEY LEAF PARAMETERS
#Use the custom methods to process the data
service_customization.ServiceDataCustomization.process_service_create_data(smodelctx, sdata, dev, device=dev, parentobj=parentobj, inputdict=inputdict, config=config)
def update(self, id, sdata):
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_update_data(smodelctx, sdata, dev=dev, parentobj=parentobj, config=config)
def delete(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.delete_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'update_policy_class_map')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = None
dev = []
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_delete_data(smodelctx, sdata, dev=dev, parentobj=parentobj, config=config)
@staticmethod
def getInstance():
if(UpdatePolicyClassMap._instance == None):
UpdatePolicyClassMap._instance = UpdatePolicyClassMap()
return UpdatePolicyClassMap._instance
#def rollbackCreate(self, id, sdata):
# log('rollback: id = %s, sdata = %s' % (id, sdata))
# self.delete(id,sdata)
| [
"[email protected]"
] | |
a1d530110266afe81a9bbd327cde526441ccc73b | b79bce0cf363d2b6dd11371d378d78d48e973270 | /tests/test_custom_multi_output_classification.py | d9959a3efafdab09cb105e8eec4ea79477e7dcfa | [
"Apache-2.0"
] | permissive | CharlotteSean/Kashgari | 2d9338761b16d9804fb81ff92ce2ab1d256c80a7 | ab9970ecf6c0164416bfbbec1378c690b0f00d76 | refs/heads/master | 2022-01-22T03:52:12.284458 | 2019-07-17T03:48:04 | 2019-07-17T03:48:04 | 197,900,673 | 2 | 0 | Apache-2.0 | 2019-07-20T08:15:03 | 2019-07-20T08:15:03 | null | UTF-8 | Python | false | false | 4,917 | py | # encoding: utf-8
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: test_custom_multi_output_classification.py
# time: 2019-05-22 13:36
import unittest
import numpy as np
import tensorflow as tf
import kashgari
from typing import Tuple, List, Optional, Dict, Any
from kashgari.layers import L
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.tasks.classification.base_model import BaseClassificationModel
from kashgari.corpus import SMP2018ECDTCorpus
from tensorflow.python.keras.utils import to_categorical
train_x, train_y = SMP2018ECDTCorpus.load_data('valid')
output_1_raw = np.random.randint(3, size=len(train_x))
output_2_raw = np.random.randint(3, size=len(train_x))
output_1 = to_categorical(output_1_raw, 3)
output_2 = to_categorical(output_2_raw, 3)
print(train_x[:5])
print(output_1[:5])
print(output_2[:5])
print(len(train_x))
print(output_1.shape)
print(output_2.shape)
class MultiOutputProcessor(ClassificationProcessor):
def process_y_dataset(self,
data: Tuple[List[List[str]], ...],
maxlens: Optional[Tuple[int, ...]] = None,
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
# Data already converted to one-hot
# Only need to get the subset
result = []
for index, dataset in enumerate(data):
if subset is not None:
target = kashgari.utils.get_list_subset(dataset, subset)
else:
target = dataset
result.append(np.array(target))
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _build_label_dict(self,
labels: List[str]):
# Data already converted to one-hot
# No need to build label dict
self.label2idx = {1: 1, 0: 0}
self.idx2label = dict([(value, key) for key, value in self.label2idx.items()])
self.dataset_info['label_count'] = len(self.label2idx)
class MultiOutputModel(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 256,
'return_sequences': False
}
}
def build_model_arc(self):
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']), name='layer_bi_lstm')
layer_output_1 = L.Dense(3, activation='sigmoid', name='layer_output_1')
layer_output_2 = L.Dense(3, activation='sigmoid', name='layer_output_2')
tensor = layer_bi_lstm(embed_model.output)
output_tensor_1 = layer_output_1(tensor)
output_tensor_2 = layer_output_2(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, [output_tensor_1, output_tensor_2])
def predict(self,
x_data,
batch_size=None,
debug_info=False,
threshold=0.5):
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
output_1 = pred[0]
output_2 = pred[1]
output_1[output_1 >= threshold] = 1
output_1[output_1 < threshold] = 0
output_2[output_2 >= threshold] = 1
output_2[output_2 < threshold] = 0
return output_1, output_2
class TestCustomMultiOutputModel(unittest.TestCase):
def test_build_and_fit(self):
from kashgari.embeddings import BareEmbedding
processor = MultiOutputProcessor()
embedding = BareEmbedding(processor=processor)
m = MultiOutputModel(embedding=embedding)
m.build_model(train_x, (output_1, output_2))
m.fit(train_x, (output_1, output_2), epochs=2)
res = m.predict(train_x[:10])
assert len(res) == 2
assert res[0].shape == (10, 3)
def test_build_with_BERT_and_fit(self):
from kashgari.embeddings import BERTEmbedding
from tensorflow.python.keras.utils import get_file
from kashgari.macros import DATA_PATH
sample_bert_path = get_file('bert_sample_model',
"http://s3.bmio.net/kashgari/bert_sample_model.tar.bz2",
cache_dir=DATA_PATH,
untar=True)
processor = MultiOutputProcessor()
embedding = BERTEmbedding(
model_folder=sample_bert_path,
processor=processor)
m = MultiOutputModel(embedding=embedding)
m.build_model(train_x, (output_1, output_2))
m.fit(train_x, (output_1, output_2), epochs=2)
res = m.predict(train_x[:10])
assert len(res) == 2
assert res[0].shape == (10, 3) | [
"[email protected]"
] | |
b6db08130173918bab964091422606ec2957af39 | a34ec07c3464369a88e68c9006fa1115f5b61e5f | /A_Basic/String/L0_1684_Count_the_Number_of_Consistent_Strings.py | 802755735771f634df680bf789b44d5b52ac935f | [] | no_license | 824zzy/Leetcode | 9220f2fb13e03d601d2b471b5cfa0c2364dbdf41 | 93b7f4448a366a709214c271a570c3399f5fc4d3 | refs/heads/master | 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 | Python | UTF-8 | Python | false | false | 213 | py | class Solution:
def countConsistentStrings(self, allowed: str, words: List[str]) -> int:
ans = 0
for word in words:
if all([w in allowed for w in word]): ans += 1
return ans | [
"[email protected]"
] | |
9207c9cab23edfac359cbc19c3db823e8b193cb9 | 84d2efd222fa190c8b3efcad083dcf2c7ab30047 | /linRegNoSTD.py | 34a9e45c64ae6545a196ca7279c57aa4acfd4220 | [] | no_license | webclinic017/Capstone-2 | aedfc8692647f2e84114da5b2e32856d0de80586 | d476723f7893c7c5da14e24f28736a8f0ba7ff55 | refs/heads/master | 2023-01-23T06:44:36.868373 | 2020-12-03T19:44:51 | 2020-12-03T19:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn.linear_model
from alpha_vantage.timeseries import TimeSeries
api_key = '8FIYTT49ZEZT2GV5'
ts = TimeSeries(key=api_key, output_format='pandas')
data, meta_data = ts.get_daily_adjusted(symbol='SPY', outputsize = 'full')
data = data.reset_index()
data.plot(x = 'date', y = '4. close')
data['date'] = data['date'].values.astype(float)
X = np.c_[data['date']]
Y = np.c_[data['4. close']]
model = sklearn.linear_model.LinearRegression()
model.fit(X, Y)
date = [[1736208000000000000.0]]
print(model.predict(date))
plt.show()
#standard deviation | [
"[email protected]"
] | |
a7784cf4b12ea9bed917ce26508e4c63ce253b6c | 12e42f4f34030b90c1841ece8d4efdd28925394f | /test/functional/wallet_scriptaddress2.py | 1f6b0e35dc51989b468955669c9f87acde059877 | [
"MIT"
] | permissive | GerardoTaboada/EducaCoin | 46d8aa08dd4b3859e59b739713ced08ec0b8c510 | c7f1be5dacd0a10464775c7eeb0eb799fc66cd43 | refs/heads/master | 2020-03-31T20:01:41.768383 | 2018-10-17T21:54:13 | 2018-10-17T21:54:13 | 152,522,009 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Educacoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [['-addresstype=legacy'], [], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main() | [
"[email protected]"
] | |
74096f3871ce295e10d08f00012c88bc032e9da1 | f972e22df004b419d23b4b03d3c7e42e604a2e2b | /compute/wps/tasks/ophidia.py | 830321413d5463dd764a9eed4384191c13d65a43 | [] | no_license | OphidiaBigData/esgf-compute-wps | 9ec663b1701f2336f08117a6fb0725d71adfe078 | 8dd26dde385fbe861c78e432e0954725d7bf9b18 | refs/heads/master | 2020-04-28T10:20:49.718253 | 2019-02-04T09:46:43 | 2019-02-04T09:46:43 | 175,198,536 | 0 | 0 | null | 2019-03-12T11:39:20 | 2019-03-12T11:39:19 | null | UTF-8 | Python | false | false | 5,490 | py | import json
import os
import uuid
import cwt
from celery.utils.log import get_task_logger
from django.conf import settings
from PyOphidia import client
from wps import WPSError
from wps.tasks import base
__ALL__ = [
'PROCESSES',
'oph_submit',
]
logger = get_task_logger('wps.tasks.ophidia')
PROCESSES = {
'Oph.max': 'max',
'Oph.min': 'min',
'Oph.avg': 'avg',
'Oph.sum': 'sum',
'Oph.std': 'std',
'Oph.var': 'var',
}
class OphidiaTask(object):
def __init__(self, name, operator, on_error=None):
self.name = name
self.operator = operator
self.on_error = on_error
self.arguments = []
self.dependencies = []
def add_arguments(self, **kwargs):
self.arguments.extend(['{}={}'.format(key, value) for key, value in kwargs.iteritems()])
def add_dependencies(self, *args):
self.dependencies.extend(dict(task=x.name) for x in args)
def to_dict(self):
data = {
'name': self.name,
'operator': self.operator,
'arguments': self.arguments,
}
if self.on_error:
data['on_error'] = self.on_error
if self.dependencies:
data['dependencies'] = self.dependencies
return data
class OphidiaWorkflow(object):
def __init__(self, oph_client):
self.oph_client = oph_client
self.workflow = {
'name': 'ESGF WPS Workflow',
'author': 'ESGF WPS',
'abstract': 'Auto-generated abstract',
'exec_mode': 'sync',
'cwd': '/',
'ncores': '2',
'tasks': []
}
def add_tasks(self, *args):
self.workflow['tasks'].extend(args)
def check_error(self):
if self.oph_client.last_error is not None and self.oph_client.last_error != '':
error = '{}\n'.format(self.oph_client.last_error)
res = self.oph_client.deserialize_response()
try:
for x in res['response'][2]['objcontent']:
for y in x['rowvalues']:
error += '\t{}: {}\n'.format(y[-3], y[-1])
except IndexError:
raise WPSError('Failed to parse last error from Ophidia')
raise WPSError(error)
def submit(self):
self.check_error()
self.oph_client.wsubmit(self.to_json())
def to_json(self):
def default(o):
if isinstance(o, OphidiaTask):
return o.to_dict()
return json.dumps(self.workflow, default=default, indent=4)
@base.cwt_shared_task()
def oph_submit(self, parent_variables, variables, domains, operation, user_id, job_id):
self.PUBLISH = base.ALL
proc = process.Process(self.request.id)
proc.initialize(user_id, job_id)
v, d, o = self.load(parent_variables, variables, domains, operation)
oph_client = client.Client(settings.WPS_OPHIDIA_USER, settings.WPS_OPHIDIA_PASSWORD, settings.WPS_OPHIDIA_HOST, settings.WPS_OPHIDIA_PORT)
workflow = OphidiaWorkflow(oph_client)
workflow.check_error()
cores = o.get_parameter('cores')
if cores is None:
cores = settings.WPS_OPHIDIA_DEFAULT_CORES
else:
cores = cores.values[0]
axes = o.get_parameter('axes')
if axes is not None:
axes = axes.values[0]
else:
axes = 'time'
proc.log('Connected to Ophidia backend, building workflow')
container_task = OphidiaTask('create container', 'oph_createcontainer', on_error='skip')
container_task.add_arguments(container='work')
proc.log('Add container task')
# only take the first input
inp = o.inputs[0]
import_task = OphidiaTask('import data', 'oph_importnc')
import_task.add_arguments(container='work', measure=inp.var_name, src_path=inp.uri, ncores=cores, imp_dim=axes)
import_task.add_dependencies(container_task)
proc.log('Added import task')
try:
operator = PROCESSES[o.identifier]
except KeyError:
raise WPSError('Process "{name}" does not exist for Ophidia backend', name=o.identifier)
if axes == 'time':
reduce_task = OphidiaTask('reduce data', 'oph_reduce')
reduce_task.add_arguments(operation=operator, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over implicit axis')
else:
reduce_task = OphidiaTask('reduce data', 'oph_reduce2')
reduce_task.add_arguments(operation=operator, dim=axes, ncores=cores)
reduce_task.add_dependencies(import_task)
proc.log('Added reduction task over axes "{}"', axes)
output_name = '{}'.format(uuid.uuid4())
export_task = OphidiaTask('export data', 'oph_exportnc2')
export_task.add_arguments(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name, ncores=cores, force='yes')
export_task.add_dependencies(reduce_task)
proc.log('Added export task')
workflow.add_tasks(container_task, import_task, reduce_task, export_task)
proc.log('Added tasks to workflow')
workflow.submit()
proc.log('Submitted workflow to Ophidia backend')
workflow.check_error()
proc.log('No errors reported by Ophidia')
output_url = settings.WPS_OPHIDIA_OUTPUT_URL.format(output_path=settings.WPS_OPHIDIA_OUTPUT_PATH, output_name=output_name)
output_var = cwt.Variable(output_url, inp.var_name, name=o.name)
return {o.name: output_var.parameterize()}
| [
"[email protected]"
] | |
1d299fc35a1f1aa5feca93086cb650a6d0e1c2f3 | 8842d6c864f12dc8853d22b8a986b01acdf0e446 | /27_12_15_Nico3/LDA.pyx | c15e5a4eef6680c7665544e3191ce137506966f6 | [] | no_license | yukkyo/ResearchSource | 0d701aa09d3cfc5aae80a022445ecf14c42f0a07 | db497d19aae41ea57d7d6dd245714a477a7a1d4c | refs/heads/master | 2021-01-18T20:01:20.427148 | 2019-06-20T05:17:54 | 2019-06-20T05:17:54 | 24,621,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | pyx | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# cython: profile=True, boundscheck=False, wraparound=False
from __future__ import division
cimport cython
from libc.stdlib cimport rand, RAND_MAX
from libcpp.vector cimport vector
from libc.math cimport log, exp
from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
# Latent Dirichlet Allocation + collapsed Gibbs sampling
# 全文書(約50万)に対してLDA(Collapsed Gibbs Sampling)を適用する
# トピック-語彙分布行列の各値からBetaを引いて転置した語彙-トピック分布、perplexitiesを返す
class LDA:
@cython.cdivision(True)
def __init__(self, r_n_topics, r_alpha, r_beta, raw_docs, r_V, r_iteration):
print "init lda instance"
self.n_topics = r_n_topics
self.alpha = r_alpha # parameter of topics prior
self.beta = r_beta # parameter of words prior
self.V = r_V # size of vocabulary
self.perps = []
self.iteration = r_iteration
print "initalize topics"
cdef vector[vector[int]] docs = raw_docs
# self.docs = docs
cdef int n_corpus, len_doc, m, n, new_z, v
n_corpus = 0
cdef int n_topics_int = self.n_topics
cdef int V_int = self.V
cdef double n_topics = self.n_topics
cdef double alpha = self.alpha
cdef double beta = self.beta
cdef double V = self.V
cdef double Vbeta = V * beta
n_topics_s = self.n_topics
v2 = self.V
# number of times topic z and word w co-occur
cdef int max_docs = 1
max_docs = docs.size()
# word count of each document and topic
cdef vector[vector[double]] n_m_z
n_m_z = vector[vector[double]](max_docs, vector[double](n_topics_int, alpha))
# word count of each topic and vocabulary
cdef vector[vector[double]] n_z_t
# n_z_t = vector[vector[double]](n_topics_int, vector[double](<int>V, beta))
n_z_t = vector[vector[double]](V_int, vector[double](n_topics_int, beta))
# word count of each topic
cdef vector[double] n_z
n_z = vector[double](n_topics_int, Vbeta)
cdef vector[vector[int]] z_m_n
cdef vector[int] z_n
for m in xrange(max_docs):
len_doc = docs[m].size()
n_corpus += len_doc
z_n.clear()
for n in xrange(len_doc):
v = docs[m][n]
new_z = int((rand()/(RAND_MAX +1.)) * n_topics)
z_n.push_back(new_z)
n_m_z[m][new_z] += 1.
n_z_t[v][new_z] += 1.
n_z[new_z] += 1
z_m_n.push_back(z_n)
print "end initialize topics"
"""learning once iteration"""
print "inference start"
cdef int j, ite, iteration
iteration = self.iteration
cdef vector[vector[double]] n_z_t_tmp
cdef vector[double] n_m_z_m
n_m_z_m.resize(n_topics_int)
cdef vector[int] z_m_n_m
cdef vector[double] p_z2
p_z2.resize(n_topics_int)
cdef double p_z2j, u, perp
# cdef long V = self.V
cdef vector[int] docs_m
cdef double n_z_j
cdef vector[double] theta
cdef double Kalpha = <double>n_topics * alpha
cdef double log_per, tmp_logper, len_doc_kalpha
print "calc first perp"
n_z_t_tmp = n_z_t
log_per = 0.0
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
for ite in xrange(iteration):
print "ite: " + str(ite)
# sampling each word in corpus
for m in xrange(max_docs):
len_doc = docs[m].size()
n_m_z_m = n_m_z[m]
z_m_n_m = z_m_n[m]
for n in xrange(len_doc):
v = docs[m][n]
# discount for n-th word n with topic z
z = z_m_n_m[n]
n_m_z_m[z] -= 1
n_z_t[v][z] -= 1
n_z[z] -= 1
# sampling new_z
for j in xrange(n_topics_int):
p_z2j = n_z_t[v][j] * n_m_z_m[j]
p_z2j /= n_z[j]
if j != 0:
p_z2j += p_z2[j-1]
p_z2[j] = p_z2j
u = (rand()/(RAND_MAX +1.))
u *= p_z2[n_topics_int - 1]
new_z = n_topics_int - 1
for j in xrange(n_topics_int):
if u < p_z2[j]:
new_z = j
break
# set z the new topic and increment counters
z_m_n_m[n] = new_z
n_m_z_m[new_z] += 1
n_z_t[v][new_z] += 1
n_z[new_z] += 1
z_m_n[m] = z_m_n_m
n_m_z[m] = n_m_z_m
if (m + 1) % 100000 == 0:
print "end docs: " + str(m + 1)
print "calc perp"
log_per = 0.0
n_z_t_tmp = n_z_t
for v in xrange(V_int):
for j in xrange(n_topics_int):
n_z_t_tmp[v][j] /= n_z[j]
for m in xrange(max_docs):
len_doc = docs[m].size()
len_doc_kalpha = <double>len_doc + Kalpha
theta = n_m_z[m]
docs_m = docs[m]
for j in xrange(n_topics_int):
theta[j] = theta[j] / len_doc_kalpha
for n in xrange(len_doc):
v = docs_m[n]
tmp_logper = 0.0
for j in xrange(n_topics_int):
tmp_logper += (theta[j] * n_z_t_tmp[v][j])
log_per -= log(tmp_logper)
theta.clear()
n_z_t_tmp.clear()
log_per /= <double>n_corpus
perp = exp(log_per)
print "perp: " + str(perp)
self.perps.append(perp)
print "calc new alpha and beta"
self.n_z_t = n_z_t
self.z_m_n = z_m_n
return | [
"[email protected]"
] | |
9fc5b6e12ba33400052ec9e08c251ff1626f1477 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /testing/test_programs/numpy/basic_numpy/arrays/stypy_test_files/numpy_array_broadcasting_4__type_data.py | 99ec5d09a07932264cc57dd68828680219e497e5 | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from testing.code_generation_testing.codegen_testing_common import instance_of_class_name
test_types = {
'__main__': {
'r4': instance_of_class_name("ndarray"),
'r5': instance_of_class_name("ndarray"),
'__name__': instance_of_class_name("str"),
'r2': instance_of_class_name("ndarray"),
'r3': instance_of_class_name("ndarray"),
'__builtins__': instance_of_class_name("module"),
'__file__': instance_of_class_name("str"),
'__package__': instance_of_class_name("NoneType"),
'r': instance_of_class_name("ndarray"),
'w': instance_of_class_name("ndarray"),
'v': instance_of_class_name("ndarray"),
'np': instance_of_class_name("module"),
'x': instance_of_class_name("ndarray"),
'__doc__': instance_of_class_name("NoneType"),
},
}
| [
"[email protected]"
] | |
7c58362f81d2eebf86e77c4f52201dabd123be2d | e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1 | /AlgorithmCodeTemplates/algorithm/sliding_window_examples.py | 634c355c9efa3418e81eeafb9f04d218da1225cd | [] | no_license | sevenhe716/LeetCode | 41d2ef18f5cb317858c9b69d00bcccb743cbdf48 | 4a1747b6497305f3821612d9c358a6795b1690da | refs/heads/master | 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | from collections import Counter
from collections import defaultdict
# [3] https://leetcode.com/problems/longest-substring-without-repeating-characters/
# variation with no pattern
def lengthOfLongestSubstring(s):
# create a default dict to maintain state
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(s):
counter[s[end]] += 1
if counter[s[end]] > 1:
count += 1
end += 1
while count > 0:
counter[s[start]] -= 1
if counter[s[start]] > 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [76] https://leetcode.com/problems/minimum-window-substring/
# variation with finding minimum
def minWindow(s: str, t: str) -> str:
counter = Counter(t)
count, start, end, res = len(t), 0, 0, [float('inf'), 0]
while end < len(s):
counter[s[end]] -= 1
# consider duplicate char in t
if counter[s[end]] >= 0:
count -= 1
end += 1
# valid in while
while count == 0:
# update minimum here, inner while loop
if end - start < res[0]:
res = (end - start, start)
counter[s[start]] += 1
if counter[s[start]] > 0:
count += 1
start += 1
return s[res[1]:res[0] + res[1]] if res[0] != float('inf') else ''
# [904] https://leetcode.com/problems/fruit-into-baskets/
# variation with list
def totalFruit(tree: 'List[int]') -> int:
cnt = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(tree):
cnt[tree[end]] += 1
if cnt[tree[end]] == 1:
count += 1
end += 1
while count > 2:
cnt[tree[start]] -= 1
if cnt[tree[start]] == 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [438] https://leetcode.com/problems/find-all-anagrams-in-a-string/
# variation with restrict between start and end
def findAnagrams(s: str, p: str) -> 'List[int]':
len_p, len_s = len(p), len(s)
if len_p > len_s:
return []
counter = Counter(p)
count, start, end, res = len_p, 0, 0, []
while end < len_s:
# only update counter when match char in p
counter[s[end]] -= 1
if counter[s[end]] >= 0:
count -= 1
end += 1
if count == 0:
res.append(start)
# not use a while, because restrict the length
if end - start == len_p:
counter[s[start]] += 1
# exclude char not in p, because always negative
if counter[s[start]] > 0:
count += 1
start += 1
return res
# [30] https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# variation with complex match policy
def findSubstring(s: str, words: 'List[str]') -> 'List[int]':
if not words:
return []
word_len, res = len(words[0]), []
# start offset from 0 to word_len, and step is word_len
for i in range(word_len):
# reset state every epoch
counter = Counter(words)
start, end, count = i, i, len(words)
while end < len(s):
cur_word = s[end:end + word_len]
# check is not necessary here, just for performance
if cur_word in counter:
counter[cur_word] -= 1
if counter[cur_word] >= 0:
count -= 1
end += word_len
if count == 0:
res.append(start)
# ensure consecutive words
if end - start == word_len * len(words):
cur_word = s[start:start + word_len]
if cur_word in counter:
counter[cur_word] += 1
if counter[cur_word] > 0:
count += 1
start += word_len
# the order is not necessary here
return res | [
"[email protected]"
] | |
f52fb6152bba23a4c0a005ca2f666c5e95d07473 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /data/lmdbMaker.py | 0081b73ccebdd68d83571914b0342cb8bcb9817a | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 6,516 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Please execute the code with python2
'''
import os
import lmdb
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
try:
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
except:
print("Image is invalid!")
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert (len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
import codecs
with open(imagePath, 'r') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
def read_image_label(image_directory, label_address):
import os
image_lis = os.listdir(image_directory)
f = open(label_address)
dict = {}
i = 1
# 图片:目标记录
for line in f.readlines():
# TODO
dict[line[10:].split(" ")[0]] = line.split(' ')[1].replace('\n', '').replace('\r',
'') # arttrain-11.art/lsvttest10.lsvt12
'''
print(dict)
i+=1
if i==14:
break
print(dict)
'''
# print(dict)
result1 = []
result2 = []
# TODO
for image_path1 in image_lis:
for image_path2 in os.listdir(image_directory + '/' + image_path1):
try:
# image_path = image_path.replace('.jpg','')
# result1.append(image_directory+'/'+image_path1+'/'+image_path2)
result2.append(dict[image_path1 + '/' + image_path2])
result1.append(image_directory + '/' + image_path1 + '/' + image_path2)
except:
# pass
print("jianzhi")
return result1, result2
def extract_result_from_xml():
import re
f = open('../xml_test/word.xml', 'r')
string = ""
for line in f.readlines():
print(line)
string += line
print(string)
# 记录文件路径
result1 = re.findall(r'file=\"(.*?)\"', string)
for i in range(len(result1)):
result1[i] = '/home/chenjingye/datasets/ICDAR2003/WordR/TrialTest/' + result1[i]
print(result1)
result2 = re.findall(r'tag=\"(.*?)\"', string)
print(result2)
return result1, result2
def ic15():
f = open('/home/chenjingye/datasets/ICDAR2015/Word_recognition/Challenge4_Test_Task3_GT.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
# print(line)
# print(line.split())
a, b = line.split(', ')
print(a, b)
result1.append(
'/home/chenjingye/datasets/ICDAR2015/Word_recognition/ch4_test_word_images_gt/' + a.replace(',', ''))
result2.append(b.replace("\"", "").replace('\r\n', ''))
print(result1)
print(result2)
return result1, result2
def find_jpg():
import os
root = "/mnt/sdb1/zifuzu/chenjingye/datasets/mnt/ramdisk/max/90kDICT32px"
flag = True
def findjpg(path, ret):
"""Finding the *.txt file in specify path"""
filelist = os.listdir(path)
for filename in filelist:
# if len(ret) > 500000 :
# return
de_path = os.path.join(path, filename)
if os.path.isfile(de_path):
if de_path.endswith(".jpg"): # Specify to find the txt file.
print(de_path)
ret.append(de_path)
# if len(ret) > 500000:
# return
else:
findtxt(de_path, ret)
ret = []
findtxt(root, ret)
for path in ret:
print(path)
try:
os.remove('./temp.txt')
except:
pass
f = open('./temp.txt', 'a')
for element in ret:
f.write(element + '\n')
f.close()
def syn90():
import re
f = open('./temp.txt', 'r')
result1 = []
result2 = []
for line in f.readlines():
result1.append(line.replace('\n', ''))
target = re.findall(r'_(.*?)_', line)[0]
result2.append(target)
return result1, result2
if __name__ == '__main__':
'''
将两个list传进createDataset函数
list1: 图片路径列表
list2: 图片标签列表
其中两个列表在相同位置
'''
imgList, labelList = ic15()
print(imgList)
print(labelList)
print("The length of the list is ", len(imgList))
'''Input the address you want to generate the lmdb file.'''
createDataset('/mnt/sdb1/zifuzu/chenjingye/datasets/syn90_train_500000data_lmdb', imgList, labelList)
| [
"[email protected]"
] | |
25ea3ccf9694bbff46ace1ccdf8a44257540ba69 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/myfavouritekk_vdetlib/vdetlib-master/utils/log.py | 331282eee61fb52e30ff5a83431ec74d430c69e0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 175 | py | #!/usr/bin/env python
import logging
logging.basicConfig(
format='[%(asctime)s %(process)d %(filename)s:%(lineno)s %(levelname)s] %(message)s',
level=logging.DEBUG)
| [
"[email protected]"
] | |
e13fab0514aa87a22f4efac43760c2d877c23adb | 64a99161204051f6f2abb9e8d88a5508952c0115 | /examples/saveLoadV1/create_save.py | 61f2ff9f7627dd79c32b0c968455c4711de7a2ad | [
"MIT"
] | permissive | suny-downstate-medical-center/netpyne | d1ba5a258ba63c8ad8b0fa91a6d8bbd99f2e8d28 | 9d08867205b776bbb467554c49df9d8aba57dcf2 | refs/heads/development | 2023-08-23T22:48:26.020812 | 2023-08-16T14:20:23 | 2023-08-16T14:20:23 | 48,733,333 | 18 | 18 | MIT | 2023-09-11T16:01:19 | 2015-12-29T07:12:08 | Jupyter Notebook | UTF-8 | Python | false | false | 164 | py | from netpyne import sim
import params
# Create network and save
sim.create(netParams=params.netParams, simConfig=params.simConfig)
sim.gatherData()
sim.saveData()
| [
"[email protected]"
] | |
46c205a3f435959086389638a9fd7fefd957308c | 99fa82f29a5b50a5595985acc460a0afaa6099a8 | /app/shopdj/sale/migrations/0004_invoice_total.py | a4f08cde3a2ed9f2afa42d4898d917d64e08dcca | [] | no_license | nnocturnnn/university_rep | a47cce9e29f96e9cc33293c76321e298e7628a4d | 4a8cd42f53dd112640a37ad5ff815ecf09ce1c25 | refs/heads/master | 2023-04-20T09:44:24.144760 | 2021-05-11T16:16:07 | 2021-05-11T16:16:07 | 304,661,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 3.0.5 on 2020-07-02 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sale', '0003_auto_20200701_0535'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='total',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
080121153af9a45d9084cd5f5233cdfb821defe7 | 23af1e2b1f29be62926ed6a8e39b4462f07f5f2b | /atcoder.jp/abc086/abc086_b/Main.py | 2caffef31e6e42003299d780de9ca6f6f538b840 | [] | no_license | IKDrocket/Atcoder | 8ef382577a377a8f35890b24a49f681f00f2f047 | fc19379de2ddf62a61b67eda33bf8aa29d503685 | refs/heads/main | 2023-02-09T11:58:00.353304 | 2021-01-02T12:06:20 | 2021-01-02T12:06:20 | 318,876,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | a,b = input().split()
num = int(a+b)
for i in range(1,10101):
ans = i*i
if num == ans:
print("Yes")
break
else:
print("No")
| [
"[email protected]"
] | |
c31db9e2643724ed66331b721d6a77560de6209a | 06167f625464c898ac95e752694a5931b9a55a55 | /src/admission/migrations/0001_initial.py | bacece5228ade3f6e66d8337c4fae54aa72fdb6d | [] | no_license | nazmul629/school_management_system | 16e2003b652b14174d6f59b4682ca366275f3207 | d0ff759645d9ba8f88d2aa63dbc867e7713455ed | refs/heads/master | 2021-06-19T18:06:56.539454 | 2019-04-20T12:35:24 | 2019-04-20T12:35:24 | 182,307,917 | 1 | 0 | null | 2021-03-20T08:15:23 | 2019-04-19T18:22:11 | CSS | UTF-8 | Python | false | false | 1,699 | py | # Generated by Django 2.0 on 2019-04-19 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='class_section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Schoolallclass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Class', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='StudentInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Student_name', models.CharField(max_length=50)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=10)),
('roll', models.IntegerField(unique=True)),
('fathers_name', models.CharField(max_length=50)),
('mothers_name', models.CharField(max_length=50)),
('address', models.TextField()),
('mobile', models.CharField(max_length=16)),
('Class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admission.Schoolallclass')),
],
),
]
| [
"[email protected]"
] | |
6e7a8849b45d4e7ef435085fefc41204dd11f94a | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Maxout_0.py | 34ed3c5b0baf796738184d4faee74db735487de9 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 608 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Maxout_0():
"""test Maxout_0"""
jit_case = JitTrans(case=yml.get_case_info("Maxout_0"))
jit_case.jit_run()
| [
"[email protected]"
] | |
0b923417f2c83d1b943f897a0e067b827cc724c3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/6d0b0f8338f7ffbc761ddc05cbdc620a99901074-<format_item>-fix.py | a42ce4e3bcf3cd32eb44b6e67fee46a95e4f787a | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | def format_item(self, item):
d = item.as_dict()
containers = d['containers']
ports = d['ip_address']['ports']
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
for port_index in range(len(ports)):
ports[port_index] = ports[port_index]['port']
for container_index in range(len(containers)):
old_container = containers[container_index]
new_container = {
'name': old_container['name'],
'image': old_container['image'],
'memory': old_container['resources']['requests']['memory_in_gb'],
'cpu': old_container['resources']['requests']['cpu'],
'ports': [],
}
for port_index in range(len(old_container['ports'])):
new_container['ports'].append(old_container['ports'][port_index]['port'])
containers[container_index] = new_container
d = {
'id': d['id'],
'resource_group': resource_group,
'name': d['name'],
'os_type': d['os_type'],
'ip_address': ('public' if (d['ip_address']['type'] == 'Public') else 'none'),
'ports': ports,
'location': d['location'],
'containers': containers,
'tags': d.get('tags', None),
}
return d | [
"[email protected]"
] | |
96fdbd1d69014c062a573ce6737c753189550b8e | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-CoreServices/PyObjCTest/test_textutils.py | 2ff838ec467acf264133c95ae598c609539c4881 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | from PyObjCTools.TestSupport import *
import CoreServices
class TestTextUtils (TestCase):
def assert_not_wrapped(self, name):
self.assertTrue(not hasattr(CoreServices, name), "%r exposed in bindings"%(name,))
def test_not_wrapped(self):
self.assert_not_wrapped('ScriptRunStatus')
self.assert_not_wrapped('BreakTable')
self.assert_not_wrapped('NBreakTable')
self.assert_not_wrapped('Munger')
self.assert_not_wrapped('NewString')
self.assert_not_wrapped('SetString')
self.assert_not_wrapped('GetString')
self.assert_not_wrapped('GetIndString')
self.assert_not_wrapped('FindWordBreaks')
self.assert_not_wrapped('LowercaseText')
self.assert_not_wrapped('UppercaseText')
self.assert_not_wrapped('StripDiacritics')
self.assert_not_wrapped('UppercaseStripDiacritics')
self.assert_not_wrapped('FindScriptRun')
self.assert_not_wrapped('UpperString')
self.assert_not_wrapped('upperstring')
self.assert_not_wrapped('UprString')
self.assert_not_wrapped('c2pstrcpy')
self.assert_not_wrapped('p2cstrcpy')
self.assert_not_wrapped('CopyPascalStringToC')
self.assert_not_wrapped('CopyCStringToPascal')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('p2cst')
self.assert_not_wrapped('P2CStr')
self.assert_not_wrapped('p2cstr')
self.assert_not_wrapped('c2pstr')
self.assert_not_wrapped('C2PStr')
self.assert_not_wrapped('P2CStr')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
fbeecaa4293179be24399fb4bb5c7eee64229141 | 50a8c057fd6d8cd0ec96ca9b79c9328432335650 | /ubisqsh.py | e945ad61a8de0c5092e765f01c13e8f9f6c84a5b | [
"MIT"
] | permissive | KurSh/qc_modem_tools | ee804b566f83e30dde13e4aaf2f55e1a95c74fda | fce2f00e226f0fce82f064d218bf6adb70ea8647 | refs/heads/master | 2023-07-07T19:16:43.556182 | 2020-12-25T20:25:52 | 2020-12-25T20:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | #!/usr/bin/env python3
from struct import unpack
import os,sys
def parse_ubihdr(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI#":
rf.seek(curpos+0x10)
hdrsize = unpack(">I", rf.read(4))[0]
blksize = unpack(">I", rf.read(4))[0]
data = unpack(">I", rf.read(4))[0]
rf.seek(curpos+0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [hdrsize,blksize,data,crc]
def parse_ubihdr2(rf):
curpos=rf.tell()
magic = rf.read(4)
if magic == b"UBI!":
flag = unpack("<I", rf.read(4))[0]
rf.seek(curpos+0xC)
blk = unpack(">I", rf.read(4))[0]
rf.seek(curpos + 0x3C)
crc = unpack(">I", rf.read(4))[0]
rf.seek(curpos)
return [flag,blk,crc]
def main():
if len(sys.argv)<2:
print("Usage: ubisqsh.py <filename>")
sys.exit()
filename=sys.argv[1]
with open(filename,'rb') as rf:
with open(filename+".out","wb") as wf:
pos=0
while pos<os.stat(filename).st_size:
hdrsize,blksize,data,crc=parse_ubihdr(rf)
rf.seek(pos+hdrsize)
flag,blk,crc=parse_ubihdr2(rf)
if flag&0xF000000==0:
print(f"Blk %d Flag %x WR" %(blk,flag))
rf.seek(pos + blksize)
rdata=rf.read(0x40000-blksize)
wf.write(rdata)
else:
print(f"Blk %d Flag %x SK" %(blk,flag))
rf.seek(pos+0x40000)
pos+=0x40000
print("Done.")
if __name__=="__main__":
main() | [
"[email protected]"
] | |
781782dc9fc9bab7ca93ae38f17db36d6e004b67 | bae5f696b76af428fb5555c147c4f1bcff1bb62e | /metalearn/examples/evaluate_test_data_envs.py | 1f25c6c717085714ed0519ac4b1425fe888f373f | [
"MIT"
] | permissive | cosmicBboy/ml-research | 1e309f881f9810e7a82a262d625db5d684752705 | 04fd31f68e7a44152caf6eaaf66ab59f136dd8f5 | refs/heads/master | 2021-01-24T09:58:25.662826 | 2020-08-10T22:08:23 | 2020-08-10T22:08:23 | 123,030,133 | 8 | 4 | MIT | 2019-06-29T20:13:37 | 2018-02-26T21:03:02 | Jupyter Notebook | UTF-8 | Python | false | false | 2,129 | py | """Evaluate controller after training."""
import joblib
import pandas as pd
import os
import torch
from pathlib import Path
from metalearn.metalearn_controller import MetaLearnController
from metalearn.inference.inference_engine import CASHInference
from metalearn.task_environment import TaskEnvironment
from metalearn.data_environments import openml_api, sklearn_classification
build_path = Path(os.path.dirname(__file__)) / ".." / "floyd_outputs" / "225"
controller = MetaLearnController.load(build_path / "controller_trial_0.pt")
experiment_results = pd.read_csv(
build_path / "rnn_metalearn_controller_experiment.csv")
base_mlf_path = build_path / "metalearn_controller_mlfs_trial_0"
# get top 10 best mlfs for each data env across all episodes.
best_mlf_episodes = (
experiment_results
.groupby("data_env_names")
.apply(lambda df: (
df.sort_values("best_validation_scores", ascending=False).head(10)))
["episode"]
.reset_index(level=1, drop=True)
)
# a dict mapping datasets to the top 10 mlfs found for those datasets.
best_mlfs = (
best_mlf_episodes.map(
lambda x: joblib.load(base_mlf_path / ("best_mlf_episode_%d.pkl" % x)))
.groupby("data_env_names")
.apply(lambda x: list(x))
.to_dict()
)
sklearn_data_envs = sklearn_classification.envs()
openml_data_envs = openml_api.classification_envs()
torch.manual_seed(10)
task_env = TaskEnvironment(
env_sources=["OPEN_ML", "SKLEARN"],
test_set_config={"OPEN_ML": {"test_size": 0.8, "random_state": 100}},
random_state=100,
enforce_limits=True,
per_framework_time_limit=720,
per_framework_memory_limit=10000,
dataset_names=list(sklearn_data_envs.keys()),
test_dataset_names=list(openml_data_envs.keys()),
error_reward=0,
target_types=["BINARY", "MULTICLASS"])
inference_engine = CASHInference(controller, task_env)
# evaluate controller on test data environments
train_env_results = inference_engine.evaluate_training_data_envs(
n=1, datasets=sklearn_data_envs.keys(), verbose=True)
test_env_results = inference_engine.evaluate_test_data_envs(n=50, verbose=True)
| [
"[email protected]"
] | |
f8e584f21699ce5bf51c3992ef099f5f3548d4d1 | 52fb627ec952bf647c625f9372581bff4764da76 | /wo_websocket.py | 71f69201f610ea526be8c98ac46edded4b559f1b | [] | no_license | syyunn/smpc-dl | b89071d277347e28979973e734b329f51020a6b0 | 41bd40ef7866062a53fb20bcff994c51f38f38d5 | refs/heads/master | 2020-08-06T00:17:01.474179 | 2019-10-05T16:39:14 | 2019-10-05T16:39:14 | 212,768,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import syft as sy
hook = sy.TorchHook(torch)
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 64
self.epochs = 10
self.lr = 0.02
self.seed = 1
self.log_interval = 1 # Log info at each batch
self.precision_fractional = 3
args = Arguments()
_ = torch.manual_seed(args.seed)
# simulation functions
def connect_to_workers(n_workers):
return [
sy.VirtualWorker(hook, id=f"worker{i+1}")
for i in range(n_workers)
]
def connect_to_crypto_provider():
return sy.VirtualWorker(hook, id="crypto_provider")
workers = connect_to_workers(n_workers=2)
crypto_provider = connect_to_crypto_provider()
# We don't use the whole dataset for efficiency purpose, but feel free to increase these numbers
n_train_items = 640
n_test_items = 640
def get_private_data_loaders(precision_fractional, workers, crypto_provider):
def one_hot_of(index_tensor):
"""
Transform to one hot tensor
Example:
[0, 3, 9]
=>
[[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]
"""
onehot_tensor = torch.zeros(*index_tensor.shape,
10) # 10 classes for MNIST
onehot_tensor = onehot_tensor.scatter(1, index_tensor.view(-1, 1), 1)
return onehot_tensor
def secret_share(tensor):
"""
Transform to fixed precision and secret share a tensor
"""
return (
tensor
.fix_precision(precision_fractional=precision_fractional)
.share(*workers, crypto_provider=crypto_provider,
requires_grad=True)
)
transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transformation),
batch_size=args.batch_size
)
private_train_loader = [
(secret_share(data), secret_share(one_hot_of(target)))
for i, (data, target) in enumerate(train_loader)
if i < n_train_items / args.batch_size
]
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True,
transform=transformation),
batch_size=args.test_batch_size
)
private_test_loader = [
(secret_share(data), secret_share(target.float()))
for i, (data, target) in enumerate(test_loader)
if i < n_test_items / args.test_batch_size
]
return private_train_loader, private_test_loader
private_train_loader, private_test_loader = get_private_data_loaders(
precision_fractional=args.precision_fractional,
workers=workers,
crypto_provider=crypto_provider)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(args, model, private_train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(
private_train_loader): # <-- now it is a private dataset
start_time = time.time()
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target) <-- not possible here
batch_size = output.shape[0]
loss = ((output - target) ** 2).sum().refresh() / batch_size
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
loss = loss.get().float_precision()
print(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTime: {:.3f}s'.format(
epoch, batch_idx * args.batch_size,
len(private_train_loader) * args.batch_size,
100. * batch_idx / len(private_train_loader),
loss.item(), time.time() - start_time))
def test(args, model, private_test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in private_test_loader:
start_time = time.time()
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum()
correct = correct.get().float_precision()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct.item(), len(private_test_loader) * args.test_batch_size,
100. * correct.item() / (len(
private_test_loader) * args.test_batch_size)))
model = Net()
model = model.fix_precision().share(*workers, crypto_provider=crypto_provider, requires_grad=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
optimizer = optimizer.fix_precision()
for epoch in range(1, args.epochs + 1):
train(args, model, private_train_loader, optimizer, epoch)
test(args, model, private_test_loader)
| [
"[email protected]"
] | |
60738160b15b49779d9eaf9e8d83139fd7afa508 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_0/Python/sleepingfire/d.py | ba96e3d60f9df7e644e9b38c0dc4523c1c6882bd | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | import sys, os, math
def main(K, C, S):
minimum = math.ceil(K / C)
if minimum > S:
return "IMPOSSIBLE"
cs = [1] * (C + 1)
for i in range(1, C+1):
cs[i] = C * cs[i-1]
tiles = []
idx = 1
depth = 0
for k in range(1, math.ceil(K / C) * C + 1):
idx = (idx - 1) * K + min(k, K)
#print(k, depth, idx)
depth += 1
if depth == C:
tiles.append(idx)
idx = 1
depth = 0
return tiles
if __name__ == "__main__":
in_path = "test.in" if len(sys.argv) == 1 else sys.argv[1]
in_file = open(in_path, 'r')
T = int(in_file.readline().rstrip())
for case_idx in range(T):
K, C, S = [int(z) for z in in_file.readline().rstrip().split()]
res = main(K, C, S)
if isinstance(res, list):
print("Case #{}: {}".format(case_idx + 1, " ".join([str(z) for z in res])))
else:
print("Case #{}: {}".format(case_idx + 1, res))
| [
"[email protected]"
] | |
b0e17c91d87c7d7e5fcc3f873986d920f6918c16 | 21a561ec0d40554a43dc5a6dfab0f4f62ddb615d | /canteen/base/__init__.py | aaae304df48bbcbcb386327709ca4b1e4a9c8d98 | [
"MIT"
] | permissive | mindis/canteen | 2745a0ebec696d1fbfcc6c4c69582711a4a7e8e6 | a0cf38333417e879712394800a49eb9d0450f96f | refs/heads/master | 2020-12-25T19:15:21.612088 | 2014-02-24T11:29:59 | 2014-02-24T11:29:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
'''
canteen base
~~~~~~~~~~~~
:author: Sam Gammon <[email protected]>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# import all the things
from .page import *
from .logic import *
from .handler import *
__all__ = (
'page',
'logic',
'handler',
'Page',
'Logic',
'Handler'
)
| [
"[email protected]"
] | |
c79dcbd3e94c42a92504220ffb36ebae2587156d | 6d5414a710f09c8a1613e1cb60dfff2d8b37e8ad | /Biweekly Contest 40/Maximum Repeating Substring.py | 62ae08890abed3b5a9fbacc68b6a88d4b8a0ed12 | [] | no_license | prashanthr11/Leetcode | 59985b5037f70933965d509083545e58716b9ec3 | c9a034073062ea01f76448b962152ec8f9b82228 | refs/heads/master | 2023-04-15T09:19:36.526698 | 2021-04-17T16:09:55 | 2021-04-17T16:09:55 | 273,557,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | class Solution:
def maxRepeating(self, a: str, b: str) -> int:
cnt = 0
tmp = b
while b in a:
cnt += 1
b += tmp
return cnt if cnt else 0
| [
"[email protected]"
] | |
0324f681a4d12c47fa524aa35bd3858f1955c899 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /tests/unit/entity/test_user_groups_entity.py | d83bcc41f01048931032fe2204cd5fa53a0413ae | [] | no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.models.user_groups_entity import UserGroupsEntity # noqa: E501
from pynifi_client.rest import ApiException
class TestUserGroupsEntity(unittest.TestCase):
"""UserGroupsEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserGroupsEntity(self):
"""Test UserGroupsEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = pynifi_client.models.user_groups_entity.UserGroupsEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9363a989c86865fd89d14d0fc1c01f2e8361c7b4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04011/s295435245.py | 4d2ae76e99072dfb9ca262f4ebf7f3e478f48296 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | N = int(input())
K = int(input())
X = int(input())
Y = int(input())
ans = 0
if N>=K:
ans = K*X+(N-K)*Y
if N<K:
ans = N*X
print(ans) | [
"[email protected]"
] | |
1da82694458a1675eda0715f585913a2ace1f065 | cd25757a1ce38f99534f8790e9d4359ab609fc17 | /build_index.py | f6d9ab58ca76b8c373d37eab4bd893fd9a161f81 | [] | no_license | daviddwlee84/SearchEngine | 64be99b2114364e8a0913a51d11215bb3c9806fa | 283d1db39900cddf3a2aad6141bd8c9f253a832a | refs/heads/master | 2023-03-16T17:55:22.135027 | 2021-02-24T09:49:54 | 2021-02-24T09:49:54 | 288,898,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | # Build index for search models
import os
import sys
import pandas as pd
from tqdm import tqdm
import argparse
curr_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(curr_dir)
from search.annoy.build_index import AnnoyIndexBuilder
from search.elastic_search.build_index import ESIndexBuilder
class IndexBuilder(object):
def __init__(self, annoy_dir: str,
es_index: str, es_host: str,
ignore_ann: bool = False, ignore_es: bool = False):
self.do_ann = not ignore_ann
self.do_es = not ignore_es
if not ignore_ann:
# Note, currently ANN can only be build from scratch (can't add index after load)
# unless we store embedding
self.ann_builder = AnnoyIndexBuilder()
self.ann_dir = annoy_dir
if not ignore_es:
self.es_builder = ESIndexBuilder(host=es_host, index=es_index)
def initialize(self):
"""
Annoy: remove *.ann, mapping.json, *.pkl
ES : delete index
https://stackoverflow.com/questions/47087741/use-tqdm-progress-bar-with-pandas
"""
if self.do_ann:
self.ann_builder.remove_old_files(self.ann_dir)
if self.do_es:
self.es_builder.clear_old_index()
self.es_builder.create_index()
def build_indices_for_pandas_object(self, df: pd.DataFrame):
"""
TODO: dealing with NaN problem (especially pd.NaT in date)
(currently just ignore the date if NaT in elastic search index builder)
"""
for i, row in tqdm(df.iterrows(), total=len(df)):
if self.do_ann:
self.ann_builder.add_index_for_article(index=i, article=row)
if self.do_es:
self.es_builder.add_index_for_article(
index=i, article=dict(row))
def build_indices_for_json_file(self, json_file: str):
# TODO: load stuff and convert the data type, this is important if the memory is limited
pass
def finish(self):
if self.do_ann:
self.ann_builder.build_index()
self.ann_builder.save_index(self.ann_dir)
if self.do_es:
self.es_builder.finish_indexing()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annoy-dir', type=str, default=os.path.join(curr_dir, 'index'),
help='Directory to place ANN models and related files.')
parser.add_argument('--es-host', type=str, default='http://stcadmin-dgx-station-002:9200',
help='Elastic search host address.')
parser.add_argument('--es-index', type=str, default='news',
help='Elastic search index to store')
parser.add_argument('--file', type=str, default=os.path.join(curr_dir, 'tools/Crawler/result/news/all_news.tsv'),
help='File to be parse and add')
parser.add_argument('--initialize', action='store_true',
help='Initialize elastic search records (be careful!) and remove annoy model (not necessary).')
parser.add_argument('--ignore-ann', action='store_true',
help='Do not built for ANN.')
parser.add_argument('--ignore-es', action='store_true',
help='Do not built for ES.')
return parser.parse_args()
# python3 SearchEngine/build_index.py --file parsed_tencent.tsv --ignore-ann --initialize
if __name__ == "__main__":
from utils.data_loader import load_tsv
args = parse_args()
builder = IndexBuilder(
annoy_dir=args.annoy_dir, es_host=args.es_host, es_index=args.es_index,
ignore_ann=args.ignore_ann, ignore_es=args.ignore_es)
if args.initialize:
print('Initializing checkpoints and elastic search data.')
builder.initialize()
if args.file.endswith('.tsv'):
df = load_tsv(args.file)
elif args.file.endswith('.json'):
from crawler.manager.combine_results import CombineResult
comb = CombineResult(simplify=True)
df = comb.load_from_json(args.file)
else:
print('Invalid file name', args.file)
exit()
builder.build_indices_for_pandas_object(df)
| [
"[email protected]"
] | |
c5b2f86108ef15f5f1ac362a8347eaf8587e4578 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_considerable.py | 90514ebc212bd524acaa3c6e06d566fdda7c1fb1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py |
#calss header
class _CONSIDERABLE():
def __init__(self,):
self.name = "CONSIDERABLE"
self.definitions = [u'large or of noticeable importance: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
227c65532562036f5e133f80f39c5b3e37744a30 | 0214ce4dd9c8973751120ced006ec90ddc10e0e6 | /xepmts_staging/models/inline_response20029.py | f37d1190293987f4bcc393b0a61685193252124f | [] | no_license | jmosbacher/pmts-staging-api-client | b9b4175a8ab52bd1c22a2845ab564cd0bd4d2e1c | d25cacc6c75b5d716414e08184c4a6bc205126f9 | refs/heads/master | 2022-11-08T09:18:38.371104 | 2020-07-01T14:52:46 | 2020-07-01T14:52:46 | 276,405,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # coding: utf-8
"""
PMT API
API for the XenonnT PMT database # noqa: E501
The version of the OpenAPI document: 0.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xepmts_staging.configuration import Configuration
class InlineResponse20029(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'items': 'list[XenonntTpcPmt]'
}
attribute_map = {
'items': '_items'
}
def __init__(self, items=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse20029 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._items = None
self.discriminator = None
if items is not None:
self.items = items
@property
def items(self):
"""Gets the items of this InlineResponse20029. # noqa: E501
:return: The items of this InlineResponse20029. # noqa: E501
:rtype: list[XenonntTpcPmt]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this InlineResponse20029.
:param items: The items of this InlineResponse20029. # noqa: E501
:type: list[XenonntTpcPmt]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20029):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse20029):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
7b6f7484cb3c1c0a99d4139fa6e0a1b4a53cbb31 | 6452ffce36d1d50dbb27657398af4314ba73c0aa | /python/sqlite-benchmark-graph.py | 90612bef91573d686282d438380c1415e3b71cf0 | [] | no_license | laysakura/sqlite3_benchmark | 1bde4f37be88e20d8a7a385ab897bfe571f7ce3b | f125db9466f9467b7fbd877285e8bd2669fe5346 | refs/heads/master | 2016-09-06T09:06:43.350515 | 2012-11-20T12:55:22 | 2012-11-20T12:55:22 | 6,775,963 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | #!/usr/bin/env python
import smart_gnuplotter
g = smart_gnuplotter.smart_gnuplotter()
import Config
import Util
def get_graph_file_name(var_graph_file_params):
ret = ""
for key in var_graph_file_params.keys():
ret += "%(key)s_%%(%(key)s)s--" % {"key": key}
return ret[:len(ret) - len("--")]
def get_title_from_var_params(var_params):
ret = ""
for key in var_params.keys():
ret += "%(key)s='%%(%(key)s)s' ; " % {"key": key}
return ret[:len(ret) - len(" ; ")]
def _get_var_graph_file_param_names():
(stdout_str, stderr_str) = Util.sh_cmd_sync(
"(cd %s/make ; make --quiet show_var_graph_file_params)" %
(Config.basedir))
return stdout_str.split()
def _get_var_plot_param_names():
(stdout_str, stderr_str) = Util.sh_cmd_sync(
"(cd %s/make ; make --quiet show_var_plot_params)" %
(Config.basedir))
return stdout_str.split()
def _get_param_keyvals(param_names):
ret = {}
for key in param_names:
value = g.do_sql(
Config.resultsDbPath,
"select distinct " + key + " from " + Config.resultsDbTable + ";",
single_col=1)
ret[key] = value
return ret
def get_var_graph_file_params():
param_names = _get_var_graph_file_param_names()
return _get_param_keyvals(param_names)
def get_var_plot_params():
param_names = _get_var_plot_param_names()
return _get_param_keyvals(param_names)
def get_where_clause(var_graph_file_params, var_plot_params):
ret = ""
for g_param in var_graph_file_params:
ret += "%(g_param)s='%%(%(g_param)s)s' and " % {"g_param": g_param}
for p_param in var_plot_params:
ret += "%(p_param)s='%%(%(p_param)s)s' and " % {"p_param": p_param}
return ret[:len(ret) - len("and ")]
def get_temp_table_sql():
return (
"""
-- Write `create temp table tmp_T0 ...'
"""
)
def plot(var_graph_file_params, var_plot_params):
## Temp table definition
init = get_temp_table_sql()
w = get_where_clause(var_graph_file_params, var_plot_params)
query = (
"select 'SQL'||sql_no, avg(real_time), stdev(real_time)" +
" from " + Config.resultsDbTable +
" where " + w +
" group by sql_no;"
)
vars_dict = var_graph_file_params.copy()
vars_dict.update(var_plot_params)
g.graphs(
(Config.resultsDbPath, query, init),
terminal=Config.graphTerminal,
output="%s/resultsGraph/%s" % (
Config.basedir,
get_graph_file_name(var_graph_file_params)),
graph_attr="""
set style fill solid 1.00 border 0
set style histogram errorbars gap 2 lw 1
set style data histogram
set xtics rotate by -45
set grid ytics
""",
graph_title=get_title_from_var_params(var_graph_file_params),
plot_title=get_title_from_var_params(var_plot_params),
using="2:3",
yrange="[0:]",
xlabel=Config.graphXlabel,
ylabel=Config.graphYlabel,
vars_dict=vars_dict,
graph_vars=var_graph_file_params.keys(),
)
def main():
## Get appropreate graph variable
var_graph_file_params = get_var_graph_file_params()
var_plot_params = get_var_plot_params()
## Elapsed time
plot(var_graph_file_params, var_plot_params)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a003bcb38318a40c71739f4d1552601723b08b17 | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/test_20190626132733.py | 79accd38ca746ea23e96e964bef94a8f31ed415e | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | # # class Foo:
# # answer = 42
# # f1 = Foo()
# # f2 = Foo()
# # print(f1.answer)
# # print(f2.answer)
# # # both will print 42
# # f1.answer = 84
# # Foo.answer = 21
# # print(f1.answer) # 84
# # print(f2.answer) # 21
# class Foo:
# def __init__(self):
# self.answer = 42
# f1 = Foo()
# f2 = Foo()
# # f2.answer = 4000
# Foo.answer = 21
# # f1.answer = 2000
# print(f1.answer)
# print(f2.answer)
# # both will print 42 still
class Scenes(object):
# def __init__(self):
# # self.starting_room = starting_room
# # self.locations = {
# # 'room1': Room1(),
# # 'room2': Room2()
# # }
map_list = [
'room1',
'room2',
'finish'
]
def start(self):
print("You are at the start")
print("Where would you like to go")
self.locations()
def room1(self):
print("You enter room 1")
print("Where would you like to go")
self.locations()
def room2(self):
print("You enter room 2")
print("Where would you like to go")
self.locations()
def finish(self):
print("You have finished")
exit(0)
def locations(self):
print("def locations:", self.map_list)
for i in self.map_list:
print(i)
cmd = {
'room1': room1,
'room2': room2,
}
def guessing_game(self):
print("Oh no a mini-game")
# class Map(Scenes):
# a = Scenes()
# map_dict = {
# 'room1': a.room1(),
# 'room2': a.room2(),
# }
# class Engine():
# def __init__(self, map):
# self.map = map
# def play(self):
# while True:
# # a = self.map.dict_locations
# print('yes')
thescenes = Scenes()
# thelocations = Locations()
# thedict = thelocations.map()
# while True:
# print("loop")
# thelocations.map.dict_locations.get('room1')
thescenes.start()
while True:
action = input("> ")
if action in thescenes.map_list:
print("success")
thescenes.map_list[action](thescenes)
| [
"[email protected]"
] | |
079a78638966c854c7a692303e50cb2a90e5ee38 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02847/s323542817.py | e4b56ed542d286d18994ff8e3ae443092f5f3288 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from sys import stdin, setrecursionlimit
WEEK = {
'SUN': 0,
'MON': 1,
'TUE': 2,
'WED': 3,
'THU': 4,
'FRI': 5,
'SAT': 6
}
def main():
input = stdin.buffer.readline
s = input()[:-1].decode()
print(7 - WEEK[s])
if __name__ == "__main__":
setrecursionlimit(10000)
main()
| [
"[email protected]"
] | |
e8390081053f84852515f18c5edfb636621d94b6 | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /dp/dp_d/12309481.py | 505ec598e3b892ba10e55f5a4b003c8f412ac4cc | [] | no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | def main():
N, W = map(int, input().split())
wv = [tuple(map(int, input().split())) for i in range(N)]
dp = [-1]*(W+1)
dp[0] = 0
for w, v in wv:
for j in range(W-1, -1, -1):
if dp[j] >= 0 and j+w <= W:
dp[j+w] = max(dp[j+w], dp[j] + v)
print(max(dp))
main()
| [
"[email protected]"
] | |
d60ad3880d7c6e574a14889e96134d03ea0cf5a7 | 54fdaa05078261180cbd7cc94c132527725b189d | /test/crab_ElectronPlots_newskim_eraBv2.py | 3669634ae8acc57d136ad537adca62dd18a27724 | [] | no_license | psiddire/ZeeAnalyzer | e488d3b65108ca923bd459cda41e61f3bd746a5b | d94b1fd4f4de19f5cdeaf405e4c0d6629b889888 | refs/heads/master | 2021-09-07T12:20:36.554253 | 2018-02-22T18:31:52 | 2018-02-22T18:31:52 | 113,574,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # from https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'plots_Zee_newskim_eraBv2'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runElectronPlots_newSkim_v2.py'
config.Data.inputDataset = '/DoubleEG/Run2017B-PromptReco-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1000
config.Data.lumiMask = 'NewJson.txt'
config.Data.runRange = '297050-299329'
#config.Data.totalUnits = 1
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'Zee_ElectronPlots_newskim_eraBv2'
config.Site.storageSite = 'T2_CH_CERN'
#all the configuration parameters https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile
#all crab commands https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3Commands
| [
"[email protected]"
] | |
4cf9b4ddf5b75a0c24363f4cabbb4a2c429cd06e | 1d9e681b204e6ec2d7a710ef45b7dec082239491 | /venv/Lib/site-packages/od_python/models/inline_response_200_23.py | be19e551a11c272b3e2fa1c510c9a94e50aeca25 | [] | no_license | 1chimaruGin/DotaAnalysis | 0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1 | 6a74cde2ee400fc0dc96305203d60c5e56d7ecff | refs/heads/master | 2020-07-21T20:48:07.589295 | 2019-09-07T12:20:15 | 2019-09-07T12:20:15 | 206,972,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20023(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'duration_bin': 'str',
'games_played': 'int',
'wins': 'int'
}
attribute_map = {
'duration_bin': 'duration_bin',
'games_played': 'games_played',
'wins': 'wins'
}
def __init__(self, duration_bin=None, games_played=None, wins=None):
"""
InlineResponse20023 - a model defined in Swagger
"""
self._duration_bin = None
self._games_played = None
self._wins = None
if duration_bin is not None:
self.duration_bin = duration_bin
if games_played is not None:
self.games_played = games_played
if wins is not None:
self.wins = wins
@property
def duration_bin(self):
"""
Gets the duration_bin of this InlineResponse20023.
Lower bound of number of seconds the match lasted
:return: The duration_bin of this InlineResponse20023.
:rtype: str
"""
return self._duration_bin
@duration_bin.setter
def duration_bin(self, duration_bin):
"""
Sets the duration_bin of this InlineResponse20023.
Lower bound of number of seconds the match lasted
:param duration_bin: The duration_bin of this InlineResponse20023.
:type: str
"""
self._duration_bin = duration_bin
@property
def games_played(self):
"""
Gets the games_played of this InlineResponse20023.
Number of games played
:return: The games_played of this InlineResponse20023.
:rtype: int
"""
return self._games_played
@games_played.setter
def games_played(self, games_played):
"""
Sets the games_played of this InlineResponse20023.
Number of games played
:param games_played: The games_played of this InlineResponse20023.
:type: int
"""
self._games_played = games_played
@property
def wins(self):
"""
Gets the wins of this InlineResponse20023.
Number of wins
:return: The wins of this InlineResponse20023.
:rtype: int
"""
return self._wins
@wins.setter
def wins(self, wins):
"""
Sets the wins of this InlineResponse20023.
Number of wins
:param wins: The wins of this InlineResponse20023.
:type: int
"""
self._wins = wins
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InlineResponse20023):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
c7bc15d72bb90e5fb7c6d3a794bcdc3c286ee53a | a3926c09872e1f74b57431fbb3e711918a11dc0a | /python/hash_table/1346_check_if_n_and_its_double_exist.py | a5c2c203328d0dbf68510dd50017a7aac7c37ec2 | [
"MIT"
] | permissive | linshaoyong/leetcode | e64297dc6afcebcee0614a153a566323bf223779 | 57080da5fbe5d62cbc0b8a34e362a8b0978d5b59 | refs/heads/main | 2022-09-15T00:05:36.476268 | 2022-08-16T14:09:11 | 2022-08-16T14:09:11 | 196,914,051 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | class Solution(object):
def checkIfExist(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
sa = set()
for a in arr:
if 2 * a in sa or (a % 2 == 0 and a // 2 in sa):
return True
sa.add(a)
return False
def test_check_if_exist():
s = Solution()
assert s.checkIfExist([10, 2, 5, 3])
assert s.checkIfExist([7, 1, 14, 11])
assert s.checkIfExist([3, 1, 7, 11]) is False
assert s.checkIfExist([-2, 0, 10, -19, 4, 6, -8]) is False
assert s.checkIfExist([0, 0])
| [
"[email protected]"
] | |
50a7a10cb9a1aa88a71c786a4b06da91c96801bc | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/anagram/7a7f1153e39747b2922e1c830c65ac0a.py | 23159322f0bf7864d05c50ccb0628b12b1aae17c | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 246 | py | def detect_anagrams(word, possibles):
found = []
for possible in possibles:
if sorted(list(word.lower())) == sorted(list(possible.lower())) and word.lower() != possible.lower():
found.append(possible)
return found
| [
"[email protected]"
] | |
528c4a1f5896ec1fbbc0e2b833b6a51ca7381b80 | 17cbe826892d06dc5aee4e4c2a5747e10933f2d0 | /hmtl/modules/text_field_embedders/__init__.py | 233345c6669cbadde24653011c00aa7013aa3810 | [
"MIT"
] | permissive | rahular/joint-coref-srl | 3fdd0e37a56e3be894f3da4ceeb030a599ff4388 | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | refs/heads/main | 2023-02-16T21:53:11.721014 | 2021-01-18T15:31:47 | 2021-01-18T15:31:47 | 330,708,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # coding: utf-8
from hmtl.modules.text_field_embedders.shortcut_connect_text_field_embedder import (
ShortcutConnectTextFieldEmbedder,
)
| [
"[email protected]"
] | |
96fd35a1314f73aa37ac76aef4bb32df8cc4fe3a | cd052f960846ea33e22abdded3106fb492f16c31 | /爬虫项目/code09/ITCast/ITCast/spiders/itcast.py | ed76197004fcb89ff0c9826496a95830942d4f4d | [] | no_license | byst4nder/his_spider | 2d96457b70894c36506e8061d8a3201ac337a5d0 | a51e31acff41292e568ac22b0e213e6cb48218fa | refs/heads/master | 2020-07-21T12:06:28.952083 | 2019-09-06T14:25:58 | 2019-09-06T14:25:58 | 206,857,595 | 1 | 0 | null | 2019-09-06T19:04:02 | 2019-09-06T19:04:02 | null | UTF-8 | Python | false | false | 1,322 | py | # coding:utf-8
# 可以通过命令创建爬虫
# #scrapy genspider itcast itcast.cn
import scrapy
from ..items import ItcastItem
class ItcastSpider(scrapy.Spider):
name = "itcast"
allowed_domains = ["itcast.cn"]
start_urls = ["http://www.itcast.cn/channel/teacher.shtml"]
def parse(self, response):
node_list = response.xpath("//div[@class='li_txt']")
# 迭代取出每个老师信息,并保存在item中
for node in node_list:
item = ItcastItem()
item['name'] = node.xpath("./h3/text()").extract_first()
item['title'] = node.xpath("./h4/text()").extract_first()
item['info'] = node.xpath("./p/text()").extract_first()
yield item
# 1. scrapy crawl itcast -o itcast.json (csv、xml、jl)
# 2. 如果需要将数据存储到scrpay不支持的格式里,比如数据库等,就必须通过管道实现
#engine.py
# Engine里的每次for迭代 parse() 方法,用来处理一个response响应提取的数据(请求、item)
# for result in spider.parse(response):
# if isinstance(result, scrapy.Item):
# pipeline.process_item(resutl, spider)
# elif isinstance(result, scrapy.Request):
# scheduler.add_request(result)
| [
"[email protected]"
] | |
1f07585f8b5bd8b97955c465f76f0b70ac4458b1 | 19be6560901ac2d1c2c1cfa307adb84295e58a9e | /backoffice/urls.py | 2e272952075115a11bb1b203fa20a5f776dcfd7c | [] | no_license | Neutrinet/backoffice | ebacf44cf9f7c7581a593a6986f1e83c2cfe2591 | cb87571a87a4f6fec54d47095e454080cf6fbe5c | refs/heads/master | 2023-04-20T09:18:31.755593 | 2023-03-30T20:54:49 | 2023-03-30T20:54:49 | 36,190,840 | 1 | 0 | null | 2017-12-11T18:13:23 | 2015-05-24T20:10:06 | Python | UTF-8 | Python | false | false | 349 | py | from members.views import ffdn_api
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^admin2/', include('admin2.urls')),
url(r'^', include('ordering.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^isp.json$', ffdn_api),
]
| [
"[email protected]"
] | |
d5c8d40acc3d63d3f90c826b6f55231a1363ca22 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_september.py | 50ec52f86c7a3f910be158d3734960cae05fab5d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py |
#calss header
class _SEPTEMBER():
def __init__(self,):
self.name = "SEPTEMBER"
self.definitions = [u'the ninth month of the year, after August and before October: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
26b37fb1f6de95098a60273063f8127dfa9cd6ee | 9e335834e7be81068f001d5451781d5c1530ebbf | /LearnPythonTHW/ex15.py | bf759b93985340432ebf224960e5e9315db9f325 | [] | no_license | jtr109/SelfLearning | c1dbffa5485d0cd2f444ea510da62a8e3d269dbc | cc920ed507647762b9855385be76869adac89e7c | refs/heads/master | 2020-04-06T04:11:31.143688 | 2016-07-22T02:19:39 | 2016-07-22T02:19:39 | 58,049,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # -*- coding: utf-8 -*-
from sys import argv
script, filename = argv
txt = open(filename)
print "Here's your file %r:" % filename
print txt.read()
print "Type the filename again:"
file_again = raw_input("> ")
txt_again = open(file_again)
print txt_again.read()
| [
"[email protected]"
] | |
eb5f2cf86ec088fe0044bbd729282c46ce185b5e | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/freedompro/light.py | 95731da914e47175459ea54202f980d23b6451cc | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 4,138 | py | """Support for Freedompro light."""
import json
from pyfreedompro import put_state
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro light."""
api_key = entry.data[CONF_API_KEY]
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(hass, api_key, device, coordinator)
for device in coordinator.data
if device["type"] == "lightbulb"
)
class Device(CoordinatorEntity, LightEntity):
"""Representation of an Freedompro light."""
def __init__(self, hass, api_key, device, coordinator):
"""Initialize the Freedompro light."""
super().__init__(coordinator)
self._session = aiohttp_client.async_get_clientsession(hass)
self._api_key = api_key
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_is_on = False
self._attr_brightness = 0
color_mode = COLOR_MODE_ONOFF
if "hue" in device["characteristics"]:
color_mode = COLOR_MODE_HS
elif "brightness" in device["characteristics"]:
color_mode = COLOR_MODE_BRIGHTNESS
self._attr_color_mode = color_mode
self._attr_supported_color_modes = {color_mode}
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self._attr_unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
if "on" in state:
self._attr_is_on = state["on"]
if "brightness" in state:
self._attr_brightness = round(state["brightness"] / 100 * 255)
if "hue" in state and "saturation" in state:
self._attr_hs_color = (state["hue"], state["saturation"])
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
async def async_turn_on(self, **kwargs):
"""Async function to set on to light."""
payload = {"on": True}
if ATTR_BRIGHTNESS in kwargs:
payload["brightness"] = round(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
if ATTR_HS_COLOR in kwargs:
payload["saturation"] = round(kwargs[ATTR_HS_COLOR][1])
payload["hue"] = round(kwargs[ATTR_HS_COLOR][0])
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self._attr_unique_id,
payload,
)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Async function to set off to light."""
payload = {"on": False}
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self._attr_unique_id,
payload,
)
await self.coordinator.async_request_refresh()
| [
"[email protected]"
] | |
00796d3b3a4472968a31b50cfda2cb973bf04186 | 2b08e2af586db3b290773bf579ba243962b5e7d5 | /interactivo.py | d1758cd6945e8311071ff7ed59a2a6a6013fd7a5 | [] | no_license | schiob/python-ciencias-basicas | e7fa4332e3038993c81388272280c4da90812959 | 433a210f1a80ecdbd6a70df468d9e579ff26df7e | refs/heads/main | 2023-06-05T05:10:15.130329 | 2021-06-24T00:20:55 | 2021-06-24T00:20:55 | 378,769,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
# The parametrized function to be plotted
def f(t, amplitude, frequency):
return amplitude * np.sin(2 * np.pi * frequency * t)
t = np.linspace(0, 1, 1000)
# Define initial parameters
init_amplitude = 5
init_frequency = 3
# Create the figure and the line that we will manipulate
fig, ax = plt.subplots()
line, = plt.plot(t, f(t, init_amplitude, init_frequency), lw=2)
ax.set_xlabel('Time [s]')
axcolor = 'lightgoldenrodyellow'
ax.margins(x=0)
# adjust the main plot to make room for the sliders
plt.subplots_adjust(left=0.25, bottom=0.25)
# Make a horizontal slider to control the frequency.
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
freq_slider = Slider(
ax=axfreq,
label='Frequency [Hz]',
valmin=0.1,
valmax=30,
valinit=init_frequency,
)
# Make a vertically oriented slider to control the amplitude
axamp = plt.axes([0.1, 0.25, 0.0225, 0.63], facecolor=axcolor)
amp_slider = Slider(
ax=axamp,
label="Amplitude",
valmin=0,
valmax=10,
valinit=init_amplitude,
orientation="vertical"
)
# The function to be called anytime a slider's value changes
def update(val):
line.set_ydata(f(t, amp_slider.val, freq_slider.val))
fig.canvas.draw_idle()
# register the update function with each slider
freq_slider.on_changed(update)
amp_slider.on_changed(update)
# Create a `matplotlib.widgets.Button` to reset the sliders to initial values.
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
freq_slider.reset()
amp_slider.reset()
button.on_clicked(reset)
plt.show() | [
"[email protected]"
] | |
e4a39f7dc670a9334da406a630aee065d7152554 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OTLModel/Datatypes/KlAlgSnelheidsregime.py | 421b590ae8912821efe765805334d2de3ff76636 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 3,552 | py | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlAlgSnelheidsregime(KeuzelijstField):
"""De snelheidsregimes met variabele mogelijkeid."""
naam = 'KlAlgSnelheidsregime'
label = 'Snelheidsregime'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlAlgSnelheidsregime'
definition = 'De snelheidsregimes met variabele mogelijkeid.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlAlgSnelheidsregime'
options = {
'120': KeuzelijstWaarde(invulwaarde='120',
label='120',
status='ingebruik',
definitie='120 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/120'),
'30': KeuzelijstWaarde(invulwaarde='30',
label='30',
status='ingebruik',
definitie='30 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/30'),
'50': KeuzelijstWaarde(invulwaarde='50',
label='50',
status='ingebruik',
definitie='50 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/50'),
'60': KeuzelijstWaarde(invulwaarde='60',
label='60',
status='ingebruik',
definitie='60 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/60'),
'70': KeuzelijstWaarde(invulwaarde='70',
label='70',
status='ingebruik',
definitie='70 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/70'),
'80': KeuzelijstWaarde(invulwaarde='80',
label='80',
status='ingebruik',
definitie='80 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/80'),
'90': KeuzelijstWaarde(invulwaarde='90',
label='90',
status='ingebruik',
definitie='90 km/h.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/90'),
'variabel': KeuzelijstWaarde(invulwaarde='variabel',
label='variabel',
status='ingebruik',
definitie='Variabele ingave.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlAlgSnelheidsregime/variabel')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
| [
"[email protected]"
] | |
b6c35090a87a08f91a9ef3303b9b4a5b23fcbb98 | a5aabe2e4057d78e687a57a6b560516a7cdb5836 | /tests/extreme/clustering/test_tman.py | c4cab7f0ccaa18b6d758bf361d21d8e50575424a | [
"MIT"
] | permissive | aratz-lasa/py-unsserv | 0ffc09ddab65a11ce917d0faa8b1b5dff091e563 | 6f332385e55d05953186b9a8b7848bca4b878e18 | refs/heads/master | 2022-12-14T21:10:12.397834 | 2020-05-03T11:29:49 | 2020-05-03T11:29:49 | 228,329,158 | 5 | 0 | MIT | 2022-12-08T07:00:55 | 2019-12-16T07:35:20 | Python | UTF-8 | Python | false | false | 4,202 | py | import asyncio
from collections import Counter
from functools import partial
from math import ceil
import pytest
from tests.utils import init_extreme_membership
from unsserv.common.gossip.config import GossipConfig
from unsserv.common.structs import Node
from unsserv.extreme.clustering.t_man import TMan
init_extreme_membership = init_extreme_membership # for flake8 compliance
CLUSTERING_SERVICE_ID = "tman"
@pytest.mark.asyncio
@pytest.fixture
async def init_tman():
tman = None
r_tmans = []
async def _init_tman(newc, r_newcs):
nonlocal tman, r_tmans
tman = TMan(newc)
await tman.join(
CLUSTERING_SERVICE_ID, ranking_function=partial(port_distance, tman.my_node)
)
for r_newc in r_newcs:
r_tman = TMan(r_newc)
await r_tman.join(
CLUSTERING_SERVICE_ID,
ranking_function=partial(port_distance, r_tman.my_node),
)
r_tmans.append(r_tman)
return tman, r_tmans
try:
yield _init_tman
finally:
await tman.leave()
for r_tman in r_tmans:
await r_tman.leave()
def port_distance(my_node: Node, ranked_node: Node):
return abs(my_node.address_info[1] - ranked_node.address_info[1])
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[GossipConfig.LOCAL_VIEW_SIZE * 2 + 1, GossipConfig.LOCAL_VIEW_SIZE * 2 + 5, 100],
)
async def test_join_tman(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 7)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 45)
cluster_nodes = [tman] + r_tmans
satisfy_ideal_neighbours = []
for cluster in cluster_nodes:
neighbours = set(cluster.get_neighbours())
key_function = partial(port_distance, cluster.my_node)
ideal_neighbours = set(
sorted(map(lambda c_n: c_n.my_node, cluster_nodes), key=key_function)[
1 : GossipConfig.LOCAL_VIEW_SIZE + 1
]
)
satisfies_half_ideal_neighbours = min(
amount, GossipConfig.LOCAL_VIEW_SIZE
) * 0.5 <= len(ideal_neighbours.intersection(neighbours))
satisfy_ideal_neighbours.append(satisfies_half_ideal_neighbours)
assert sum(satisfy_ideal_neighbours) / (amount + 1) >= 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[
GossipConfig.LOCAL_VIEW_SIZE + 1,
GossipConfig.LOCAL_VIEW_SIZE + 5,
GossipConfig.LOCAL_VIEW_SIZE + 100,
],
)
async def test_leave_tman(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 7)
await tman.leave()
await newc.leave()
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 40)
all_nodes = Counter(
[
item
for sublist in map(lambda n: n.get_neighbours(), r_tmans)
for item in sublist
]
)
nodes_ten_percent = ceil(amount * 0.2)
assert newc.my_node not in all_nodes.keys() or newc.my_node in set(
map(lambda p: p[0], all_nodes.most_common()[-nodes_ten_percent:])
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"amount",
[
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 1,
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 5,
(GossipConfig.LOCAL_VIEW_SIZE * 2) + 100,
],
) # very high neighbours amount,
# to assure neighbours will change, because it is initailzied by Newscast
async def test_tman_handler(init_extreme_membership, init_tman, amount):
newc, r_newcs = await init_extreme_membership(amount)
tman, r_tmans = await init_tman(newc, r_newcs)
handler_event = asyncio.Event()
async def handler(local_view):
assert isinstance(local_view, list)
nonlocal handler_event
handler_event.set()
tman.add_neighbours_handler(handler)
await asyncio.sleep(GossipConfig.GOSSIPING_FREQUENCY * 15)
assert handler_event.is_set()
| [
"[email protected]"
] | |
8b8cc3dcee06ab2783d556bc60df2a47668c5d00 | ffcce7bc3d82f19a2e024549f9fe3cd8e8702203 | /examples/other/animation2.py | 465af025dde5cf668f825fb115e5caad8b1f804a | [
"MIT"
] | permissive | jlqzzz/vtkplotter | 97f122e533b7f7d2dae1d7523d96326fbe5b8b60 | 6d28cb79153ddef29bc7b0bd19ddde655dcc392c | refs/heads/master | 2022-03-25T03:15:44.487184 | 2019-12-02T18:50:10 | 2019-12-02T18:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | """
This example shows how to animate simultaneously various objects
by specifying event times and durations of the visual effects.
"""
from vtkplotter import *
s = load(datadir+"bunny.obj").subdivide().normalize()
vp = Animation()
vp.timeResolution = 0.02 # secs
vp.switchOn(s)
# no need to repeat t=1, duration=3 in changeLighting and changeColor
vp.meshErode(corner=0, t=1, duration=3).changeLighting("glossy").changeColor("v")
cam1 = orientedCamera(backoffVector=(0, 0, -1), backoff=8)
cam2 = orientedCamera(backoffVector=(1, 1, 1), backoff=8)
vp.moveCamera(cam1, cam2, t=0, duration=4)
vp.play()
| [
"[email protected]"
] | |
fe34abe7832b4c957422ee6ce3e3eb4df632a86d | ebcc40516adba151e6a1c772223b0726899a26eb | /tests/io_/test_versions.py | 462e87461b8534cdaa4886943c80729df6647e0a | [
"MIT"
] | permissive | spacetx/slicedimage | acf4a767f87b6ab78e657d85efad22ee241939f4 | eb8e1d3899628db66cffed1370f2a7e6dd729c4f | refs/heads/master | 2021-04-09T10:53:15.057821 | 2020-05-26T17:40:11 | 2020-05-26T17:40:11 | 125,316,414 | 7 | 4 | MIT | 2020-05-26T17:40:15 | 2018-03-15T05:24:24 | Python | UTF-8 | Python | false | false | 374 | py | from packaging import version
from slicedimage import VERSIONS
def test_version_increasing_order():
"""Verifies that the VERSIONS list is in increasing order."""
for ix in range(1, len(VERSIONS)):
prev_version = VERSIONS[ix - 1]
curr_version = VERSIONS[ix]
assert version.parse(prev_version.VERSION) < version.parse(curr_version.VERSION)
| [
"[email protected]"
] | |
6ccd00459fa87e1a94f5758411d0bbdb9aec6367 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Desenvolvimento web com Flask/Capitulo04/Nível 01/exemplo4_3.py | a387fdd0382fcffe143985b37b25f2d588a471a9 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # Program: exemplo4_3.py
# Author: Ramon R. Valeriano
# Description: Fazendos os programas do Capítulo 04, do nível 01
# Developed: 09/03/2020 - 14:48
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
app.config['SECRET_KEY'] = 'testandoaplicacao'
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(FlaskForm):
name = StringField('Qual é seu nome?', validators=[DataRequired()])
submit = SubmitField('Submeter')
@app.route('/', methods=['GET', 'POST'])
def index():
name = None
form = NameForm()
if form.validate_on_submit():
name = form.name.data
form.name.data = ''
return render_template('indexInicial1.html', form=form, name=name)
@app.route('/user/<name>')
def user(name):
return render_template('userInicial.html', name=name)
@app.errorhandler(404)
def pagina_nao_encontrada(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def erro_servidor(e):
return render_template('500.html'), 500
app.run(debug=True) | [
"[email protected]"
] | |
4a0cf341c54b5e86925e1e4083c443883922d80b | 23e40d2056bcadb28aad88ef58efb95461516072 | /main/settings.py | aab3cf8e575662e3a8938c00d1089bb561948ca2 | [] | no_license | drewvpham/dojo_secrets | 6436335005373ab93e3dd58e5943b918313f3fa9 | 120016c86d17643c59f6e18aff84d5ccb84b52b0 | refs/heads/master | 2021-01-22T04:15:15.385267 | 2017-05-30T21:47:34 | 2017-05-30T21:47:34 | 92,445,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's(u^ipke+4s%dj7x@pe0jk1i7)bki!#%n)0&=nl3$f0y6*tks0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.dojo_secrets',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
6d2cce74c330a140309c8fdbe07a96d959916b66 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_obj_shell_2nd/test_c40407.py | bc6d1b372424eea87293ec88ccf7c74bca513fd8 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_acl import *
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 40407
# 添加一个weekly schedule,包含一个schedule,查看log
def test_obj_wxw(browser):
try:
login_web(browser, url="10.2.2.81")
# 切换到默认frame
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("lefttree")
# 点击对象
browser.find_element_by_xpath(对象).click()
# 点击计划任务
browser.find_element_by_xpath(计划任务).click()
# 点击基础计划任务
browser.find_element_by_xpath('//*[@id="menu"]/div[4]/div/ul/li[5]/ul/li[1]/span/a/span').click()
add_obj_schdule_wxw(browser, name='schdule_407', desc='描述', recurring='yes', fromtime='01:00', totime='02:00')
# 切换到默认frame
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("lefttree")
# 点击计划任务
browser.find_element_by_xpath(计划任务).click()
# 点击周计划任务
browser.find_element_by_xpath(周计划任务).click()
add_obj_weekly_schdule_wxw(browser, name='week_schd_407', desc='miaoshu',
monday='yes', schdule1='schdule_407',
tuesday='', schdule2='',
wednesday='', schdule3='',
thursday='', schdule4='',
friday='', schdule5='',
saturday='', schdule6='',
sunday='yes', schdule7='schdule_407', )
time.sleep(2)
# 切换到默认frame
browser.switch_to.default_content()
get_log(browser, 管理日志)
browser.switch_to.default_content()
# 切换到左侧frame
browser.switch_to.frame("content")
loginfo = browser.find_element_by_xpath('//*[@id="namearea0"]').text
# print(loginfo)
try:
assert "配置周程表对象成功,添加内部对象 [week_schd_407]" in loginfo
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "配置周程表对象成功,添加内部对象 [week_schd_407]" in loginfo
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
reload(hostip="10.2.2.81")
print(err)
rail_fail(test_run_id, test_id)
time.sleep(70)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c40407.py"]) | [
"[email protected]"
] | |
52f26c2206606fbefa691cc6942173f41b6ca058 | 3f2b2c885e81a15ed22b4a781bc2e8f5f264b336 | /mhs/common/mhs_common/messages/envelope.py | 647b5cfc88274a3290b87d36d4fc9f90d8920933 | [
"Apache-2.0"
] | permissive | nhsconnect/prm-deductions-integration-adaptors | 9c947dbca3c5bf22874efb35364cd22b52acd795 | 17c78a2b2df3755736500d8b10f3e09c99263ef2 | refs/heads/deductions | 2021-07-20T11:13:59.339647 | 2020-12-22T09:48:09 | 2020-12-22T09:48:09 | 246,785,891 | 0 | 2 | Apache-2.0 | 2021-04-30T21:57:17 | 2020-03-12T08:50:37 | Python | UTF-8 | Python | false | false | 2,123 | py | """This module defines the base envelope used to wrap messages to be sent to a remote MHS."""
from __future__ import annotations
import abc
import pathlib
from typing import Dict, Tuple, Any
from builder import pystache_message_builder
from definitions import ROOT_DIR
FROM_PARTY_ID = "from_party_id"
TO_PARTY_ID = "to_party_id"
CPA_ID = "cpa_id"
CONVERSATION_ID = 'conversation_id'
SERVICE = "service"
ACTION = "action"
MESSAGE_ID = 'message_id'
TIMESTAMP = 'timestamp'
TO_ASID = 'to_asid'
FROM_ASID = 'from_asid'
RECEIVED_MESSAGE_ID = "received_message_id"
MESSAGE = "hl7_message"
CONTENT_TYPE_HEADER_NAME = "Content-Type"
TEMPLATES_DIR = "data/templates"
class Envelope(abc.ABC):
"""An envelope that contains a message to be sent to a remote MHS."""
def __init__(self, template_file: str, message_dictionary: Dict[str, Any]):
"""Create a new EbxmlEnvelope that populates the specified template file with the provided dictionary.
:param template_file: The template file to populate with values.
:param message_dictionary: The dictionary of values to use when populating the template.
"""
self.message_dictionary = message_dictionary
ebxml_template_dir = str(pathlib.Path(ROOT_DIR) / TEMPLATES_DIR)
self.message_builder = pystache_message_builder.PystacheMessageBuilder(ebxml_template_dir, template_file)
@abc.abstractmethod
def serialize(self) -> Tuple[str, Dict[str, str], str]:
"""Produce a serialised representation of this message.
:return: A tuple of: the message id, headers to send along with the message and the serialized representation
of the message.
"""
pass
@classmethod
@abc.abstractmethod
def from_string(cls, headers: Dict[str, str], message: str) -> Envelope:
"""Parse the provided message string and create an instance of an Envelope.
:param headers A dictionary of headers received with the message.
:param message: The message to be parsed.
:return: An instance of an Envelope constructed from the message.
"""
pass
| [
"[email protected]"
] | |
2a12c95c893661baca9bef4785d6924789ae87e7 | 50e3d53c47250bca40fbbe49ea6f5979cf3ca807 | /tson/token.py | 46875ebbe66e3600a47e3695f0654d5d388e3a25 | [] | no_license | cstuartroe/tson | 1d85749e16d611dcf653cef4adc944932450db01 | 5485e0b6480150f3535c0ce634d228876dd76ba2 | refs/heads/main | 2023-04-18T12:11:35.751657 | 2021-05-02T18:39:58 | 2021-05-02T18:39:58 | 359,987,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | import re
import sys
SINGLE_CHAR_ESCAPES = {
'"': '"',
'\\': '\\',
'/': '/',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
}
LABEL_RE = "[a-zA-Z_][a-zA-Z0-9_]*"
NUMERIC_RE = r"-?[0-9]+(\.[0-9]+)?"
BOOLS = {
"true": True,
"false": False,
}
DECLARERS = {
"type",
"let",
}
class Token:
LABEL = "LABEL"
NUMBER = "NUMBER"
BOOL = "BOOL"
STRING = "STRING"
DECLARER = "DECLARER"
NULL = "null"
UNDEFINED = "undefined"
def __init__(self, line_no, col_no, line, s, ttype, value):
self.line_no = line_no
self.col_no = col_no
self.line = line
self.s = s
self.ttype = ttype
self.value = value
def raise_error(self, message):
print(f"Line {self.line_no}, column {self.col_no}")
print(self.line)
print(' '*self.line_no + '^' + '~' * (len(self.s) - 1))
print(message)
sys.exit()
def __repr__(self):
return f"Token(line_no={self.line_no+1}, col_no={self.col_no+1}, s={repr(self.s)}, ttype={repr(self.ttype)}, value={repr(self.value)})"
@staticmethod
def resolve_symbol(s):
if s in BOOLS:
return BOOLS[s], Token.BOOL
elif s in DECLARERS:
return s, Token.DECLARER
elif s in KEYWORDS:
return None, s
else:
return s, Token.LABEL
KEYWORDS = {
"let",
"type",
"import",
"export",
Token.NULL,
Token.UNDEFINED
}
class Tokenizer:
def __init__(self, lines):
self.lines = lines
def tokens(self):
self.line_no = 0
self.col_no = 0
self.pass_whitespace()
while not self.eof():
yield self.grab_token()
def eof(self):
return self.line_no == len(self.lines) - 1 and self.eol()
def eol(self):
return self.col_no == len(self.current_line())
def newline(self):
self.line_no += 1
self.col_no = 0
def current_line(self):
return self.lines[self.line_no]
def rest(self):
return self.current_line()[self.col_no:]
def next(self, i=1):
return self.current_line()[self.col_no:min(self.col_no+i, len(self.current_line()))]
def pass_whitespace(self):
while True:
if self.eof():
break
elif self.eol():
self.newline()
elif self.next() in ' \t\r\n':
self.col_no += 1
else:
break
def grab_token(self):
line_no = self.line_no
col_no = self.col_no
line = self.current_line()
label = re.match(LABEL_RE, self.rest())
number = re.match(NUMERIC_RE, self.rest())
if number:
s = number.group()
val = int(s)
ttype = Token.NUMBER
self.col_no += len(s)
elif label:
s = label.group()
val, ttype = Token.resolve_symbol(s)
self.col_no += len(s)
# TODO: strings
else:
s = self.next()
val = None
ttype = s
self.col_no += 1
self.pass_whitespace()
return Token(
line_no=line_no,
col_no=col_no,
line=line,
s=s,
value=val,
ttype=ttype,
)
| [
"[email protected]"
] | |
efb465ae45ed7fa2b00ca94106f0b6d33d05e6bd | 135f624cf8c2d95eff09a07397da44c8e76d1a70 | /src/tasks/migrations/0003_auto_20170206_1153.py | 8c2bbb6235854c056f563c00840d7cc4929eb512 | [] | no_license | uk-gov-mirror/datagovuk.publish_data_alpha | 42709ffdf1e3ccedf6c5c742078fda5fc9522712 | 4cbafff4311da0693d456953d01b24f27101e41f | refs/heads/master | 2021-06-17T23:05:50.835551 | 2017-05-25T15:00:27 | 2017-05-25T15:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-06 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_task_related_object_id'),
]
operations = [
migrations.AlterField(
model_name='task',
name='related_object_id',
field=models.CharField(blank=True, max_length=38, null=True),
),
]
| [
"[email protected]"
] | |
0a7f5bc245d7a0b1e7ff8dee61d3c6b185e1ebf3 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /Python Fundamentals 2020 - 2021/13 - TEXT PROCESSING/More exercise - 13 - 05.py | 5a2252209bfb53e58143d4c6eca76fe595a218b6 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | title_of_an_article = input()
print(f"<h1>")
print(title_of_an_article)
print(f"</h1>")
content_of_the_article = input()
print(f"<article>")
print(content_of_the_article)
print(f"</article>")
while True:
comment = input()
if comment == "end of comments":
break
print(f"<div>")
print(comment)
print(f"</div>") | [
"[email protected]"
] | |
715b78f5c54afb7db2f05ce2265f7dd90eed0a8d | 76a68cbbaf9c4aa15ec9b59455e33c6784229bdb | /MECS_gym/venv/bin/easy_install-3.5 | 82fa72283113e10abd217602c94e6c52cf7de730 | [
"MIT"
] | permissive | Python-Repository-Hub/KAIST_MEC_simulator | e63f704bd20b36fd1a8ffa30b7e736c50351d1cc | 26556a8216e2bbdfa5c5ee377e9deb51ea7602f8 | refs/heads/master | 2023-08-30T06:57:39.003766 | 2021-11-18T16:16:48 | 2021-11-18T16:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | 5 | #!/home/wisrl/Downloads/baselines-master_final_ppo2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"[email protected]"
] | |
63672e5230782b6ef6729b1836332595ccc3ecfd | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/Pythia_i/share/PYTUNE_pprintout.py | 564fee7303a86b2b89df12767930b5600ed7390c | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | #______________________________________________________________________________________________________________________
# author: liza.mijovic@_nospam_cern.ch
#
# jO effect: dump the values of all parameters PYTUNE can set - this jopts are meant for runnig with GET_TUNE_params.sh
#
# ______________________________________________________________________________________________________________________
from AthenaCommon.AlgSequence import AlgSequence
topAlg = AlgSequence("TopAlg")
from Pythia_i.Pythia_iConf import Pythia
topAlg += Pythia()
Pythia = topAlg.Pythia
theApp.EvtMax = 0
Pythia.Tune_Name="ATLAS_-1"
Pythia.Direct_call_to_pytune=REPIND
# if one want the call to be equivalen to Pythia.Tune_Name="PYTUNE_XXX"
# the ATLAS stable particles convention should also be added
Pythia.PygiveCommand += [ "mstj(22)=2" ]
PYDAT1_PARAMS=[ "MSTU", "PARU", "MSTJ", "PARJ" ]
PYPARS_PARAMS=[ "MSTP", "PARP", "MSTI", "PARI" ]
PYTUNE_PARAMS=PYDAT1_PARAMS+PYPARS_PARAMS
PQ_LIST=[]
for i in PYTUNE_PARAMS:
PQ_LIST+=[i+"("+repr(x)+")=" for x in range(1,201)]
Pythia.PygiveCommand += PQ_LIST
Pythia.Param_Query_AfterInit += PQ_LIST
| [
"[email protected]"
] | |
9c47159651e9c6fdafe463538fc91b52c74619cd | 3f09e77f169780968eb4bd5dc24b6927ed87dfa2 | /src/Problems/Unique_Paths_II.py | 99924d8912f90f3e4316559a40514a8aff937e3d | [] | no_license | zouyuanrenren/Leetcode | ad921836256c31e31cf079cf8e671a8f865c0660 | 188b104b81e6c73792f7c803c0fa025f9413a484 | refs/heads/master | 2020-12-24T16:59:12.464615 | 2015-01-19T21:59:15 | 2015-01-19T21:59:15 | 26,719,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | '''
Created on 22 Nov 2014
@author: zouyuanrenren
'''
'''
Follow up for "Unique Paths":
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
For example,
There is one obstacle in the middle of a 3x3 grid as illustrated below.
[
[0,0,0],
[0,1,0],
[0,0,0]
]
The total number of unique paths is 2.
Note: m and n will be at most 100.
'''
'''
The basic idea is still dynamic programming, similar to Unique_Path.
The only difference is to consider the obstacle, the unique path to a obstacle is 0
'''
class Solution:
# @param obstacleGrid, a list of lists of integers
# @return an integer
def uniquePathsWithObstacles(self, obstacleGrid):
matrix = []
row = len(obstacleGrid)
col = len(obstacleGrid[0])
if obstacleGrid[0][0] != 1:
matrix.append([1])
else:
return 0
for i in range(1, col):
if obstacleGrid[0][i] == 0:
matrix[0].append(matrix[0][i-1])
else:
matrix[0].append(0)
for i in range(1, row):
if obstacleGrid[i][0] == 0:
matrix.append([matrix[i-1][0]])
else:
matrix.append([0])
for j in range(1, col):
if obstacleGrid[i][j] == 0:
matrix[i].append(matrix[i-1][j]+matrix[i][j-1])
else:
matrix[i].append(0)
return matrix[row-1][col-1]
matrix = [
[0,0,0],
[0,1,0],
[0,0,0]
]
print Solution().uniquePathsWithObstacles(matrix)
| [
"[email protected]"
] | |
650ae2260d45097b8160c5a8e332d7be6a280eb9 | 33f1c49920201e21adaf794c826148d0330db4a1 | /python/binary search/141_sqrt_x.py | 688a4f77400111f7206db6c0513756dfcec4b3c1 | [] | no_license | zsmountain/lintcode | 18767289566ccef84f9b32fbf50f16b2a4bf3b21 | 09e53dbcf3b3dc2b51dfb343bf77799632efd219 | refs/heads/master | 2020-04-04T21:35:07.740575 | 2019-03-16T20:43:31 | 2019-03-16T20:43:31 | 156,291,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | '''
Implement int sqrt(int x).
Compute and return the square root of x.
Have you met this question in a real interview?
Example
sqrt(3) = 1
sqrt(4) = 2
sqrt(5) = 2
sqrt(10) = 3
'''
class Solution:
"""
@param x: An integer
@return: The sqrt of x
"""
def sqrt(self, x):
# write your code here
if x < 0:
raise Exception('Invalid Input!')
if x < 2:
return x
start, end = 1, x
while start + 1 < end:
mid = (start + end) // 2
if mid * mid < x:
start = mid
elif mid * mid > x:
end = mid
else:
return mid
if end * end < x:
return end
else:
return start
s = Solution()
print(s.sqrt(2147483647))
print(s.sqrt(3))
print(s.sqrt(4))
print(s.sqrt(5))
print(s.sqrt(10))
| [
"[email protected]"
] | |
3b23e16e50ca0f47fef9667521ed3075d96d58e2 | 1f6212fab177fb8b84258f297928f5d6b97908d4 | /apps/CMDB/model/idc_models.py | 6bce6d2afca8721001a890b7cae21920a613b25d | [] | no_license | logan0709/roe | 9773ca058e017648bc9a9c05abf1597268a9759a | 3f87cfb08471f0e307c08fe0d5de064b4ea8e35b | refs/heads/master | 2020-04-08T22:00:51.851184 | 2018-11-29T10:37:30 | 2018-11-29T10:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
IDCType = (
('DX', u'电信'),
('LT', u'联通'),
('YD', u'移动'),
('ZJ', u'自建'),
('BGP', u'BGP')
)
#IDC 机房信息
class Idc(models.Model):
name = models.CharField(u"机房名称", max_length=30, null=True)
type = models.CharField(choices=IDCType, max_length=20, verbose_name=u'机房类型', default='BGP')
address = models.CharField(u"机房地址", max_length=100, null=True,blank=True)
tel = models.CharField(u"机房电话", max_length=30, null=True,blank=True)
contact = models.CharField(u"客户经理", max_length=30, null=True,blank=True)
contact_phone = models.CharField(u"移动电话", max_length=30, null=True,blank=True)
jigui = models.CharField(u"机柜信息", max_length=30, null=True,blank=True)
ip_range = models.CharField(u"IP范围", max_length=30, null=True,blank=True)
bandwidth = models.CharField(u"接入带宽", max_length=30, null=True,blank=True)
start_date = models.DateField(null=True, blank=True, verbose_name=u'租赁日期')
end_date = models.DateField(null=True, blank=True, verbose_name=u'到期日期')
cost = models.CharField(blank=True, max_length=20, verbose_name=u'租赁费用')
def __unicode__(self):
return self.name
class Meta:
db_table=u'IDC'
verbose_name = u'IDC'
verbose_name_plural = verbose_name
class Zone_Assets(models.Model):
zone_name = models.CharField(max_length=100, unique=True)
zone_contact = models.CharField(max_length=100, blank=True, null=True, verbose_name='机房联系人')
zone_number = models.CharField(max_length=100, blank=True, null=True, verbose_name='联系人号码')
zone_network = models.CharField(max_length=100, blank=True, null=True, verbose_name='机房网段')
'''自定义权限'''
class Meta:
db_table = 'opsmanage_zone_assets'
permissions = (
("can_read_zone_assets", "读取机房资产权限"),
("can_change_zone_assets", "更改机房资产权限"),
("can_add_zone_assets", "添加机房资产权限"),
("can_delete_zone_assets", "删除机房资产权限"),
)
verbose_name = '机房资产表'
verbose_name_plural = '机房资产表'
class Line_Assets(models.Model):
line_name = models.CharField(max_length=100, unique=True)
'''自定义权限'''
class Meta:
db_table = 'opsmanage_line_assets'
permissions = (
("can_read_line_assets", "读取出口线路资产权限"),
("can_change_line_assetss", "更改出口线路资产权限"),
("can_add_line_assets", "添加出口线路资产权限"),
("can_delete_line_assets", "删除出口线路资产权限"),
)
verbose_name = '出口线路资产表'
verbose_name_plural = '出口线路资产表'
| [
"[email protected]"
] | |
e577ecad7dd739f19d5d4c54911877c6f273475d | cc128e9804ce0cb659421d2b7c98ff4bfbb9d90b | /train_mnist.py | 1b0d31c2e9272ae7e592fb1a42f457db483d3fe7 | [] | no_license | hope-yao/robust_attention | 6beb2de2c3b849c66e79ec71ae81ed127cee3079 | 905a32f02bb8d4709666036f6a6e1f82684f8716 | refs/heads/master | 2020-04-02T08:52:48.430423 | 2018-10-30T00:13:55 | 2018-10-30T00:13:55 | 154,265,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,507 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from model_mnist import Model_madry, Model_att, Model_crop
from pgd_attack import *
from viz2 import *
import os
import numpy as np
from pgd_attack import LinfPGDAttack
from utils import creat_dir
from tqdm import tqdm
slim = tf.contrib.slim
def main(cfg):
img_size = cfg['img_size']
batch_size = cfg['batch_size']
num_glimpse = cfg['num_glimpse']
glimpse_size = cfg['glimpse_size']
lr = cfg['lr']
input_images = tf.placeholder(tf.float32,shape=(batch_size, img_size, img_size, 1))
input_label = tf.placeholder(tf.int64,shape=(batch_size))
# build classifier
#model = Model_att(input_images, input_label, glimpse_size, num_glimpse)
# model = Model_madry(input_images, input_label)
model = Model_crop(input_images, input_label)
# setup attacker
attack = LinfPGDAttack(model, epsilon=0.3, k=40, a=0.01, random_start=True, loss_func='xent')
## OPTIMIZER ##
learning_rate = tf.Variable(lr) # learning rate for optimizer
optimizer=tf.train.AdamOptimizer(learning_rate, beta1=0.5)
grads=optimizer.compute_gradients(model.xent)
train_op=optimizer.apply_gradients(grads)
saver = tf.train.Saver()
## training starts ###
FLAGS = tf.app.flags.FLAGS
tfconfig = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
init = tf.global_variables_initializer()
sess.run(init)
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
hist = {'train_acc': [],
'train_adv_acc': [],
'test_acc': [],
'test_adv_acc': [],
'train_loss': [],
'test_loss': [],
'train_adv_loss': [],
'test_adv_loss': []}
train_iters=500000
for itr in tqdm(range(train_iters)):
x_batch_train, y_batch_train = mnist.train.next_batch(batch_size)
x_batch_train_adv = attack.perturb(x_batch_train.reshape(batch_size, img_size, img_size, 1), y_batch_train, sess)
adv_dict_train = {input_images: x_batch_train_adv.reshape(batch_size, img_size, img_size, 1),
input_label: y_batch_train}
nat_dict_train = {input_images: x_batch_train.reshape(batch_size, img_size, img_size, 1),
input_label: y_batch_train}
sess.run(train_op, feed_dict=adv_dict_train)
if itr % 100 == 0:
y_pred, train_loss_i = sess.run([model.y_pred, model.xent], feed_dict=nat_dict_train)
counts = np.asarray([np.argmax(np.bincount(y_pred[:,i])) for i in range(batch_size)])
train_acc_i = np.mean(counts == nat_dict_train[input_label])
x_batch_test, y_batch_test = mnist.test.next_batch(batch_size)
nat_dict_test = {input_images: x_batch_test.reshape(batch_size, img_size, img_size, 1),
input_label: y_batch_test}
y_pred, test_loss_i = sess.run([model.y_pred, model.xent], feed_dict=nat_dict_test)
counts = np.asarray([np.argmax(np.bincount(y_pred[:,i])) for i in range(batch_size)])
test_acc_i = np.mean(counts == nat_dict_test[input_label])
print("iter: {}, train_acc:{} test_acc:{} train_loss:{} test_loss:{} "
.format(itr, train_acc_i, test_acc_i, train_loss_i, test_loss_i))
x_batch_train_adv = attack.perturb(x_batch_train.reshape(batch_size, img_size, img_size, 1), y_batch_train, sess)
adv_dict_train = {input_images: x_batch_train_adv.reshape(batch_size, img_size, img_size, 1),
input_label: y_batch_train}
y_pred, train_adv_loss_i = sess.run([model.y_pred, model.xent], feed_dict=adv_dict_train)
counts = np.asarray([np.argmax(np.bincount(y_pred[:,i])) for i in range(batch_size)])
train_adv_acc_i = np.mean(counts == adv_dict_train[input_label])
x_batch_test_adv = attack.perturb(x_batch_test.reshape(batch_size, img_size, img_size, 1), y_batch_test, sess)
adv_dict_test = {input_images: x_batch_test_adv.reshape(batch_size, img_size, img_size, 1),
input_label: y_batch_test}
y_pred, test_adv_loss_i = sess.run([model.y_pred, model.xent], feed_dict=adv_dict_test)
counts = np.asarray([np.argmax(np.bincount(y_pred[:,i])) for i in range(batch_size)])
test_adv_acc_i = np.mean(counts == adv_dict_test[input_label])
print("iter: {}, train_adv_acc:{} test_adv_acc:{} train_adv_loss:{} test_adv_loss:{} "
.format(itr, train_adv_acc_i, test_adv_acc_i, train_adv_loss_i, test_adv_loss_i))
hist['train_acc'] += [train_acc_i]
hist['train_adv_acc'] += [train_adv_acc_i]
hist['test_acc'] += [test_acc_i]
hist['test_adv_acc'] += [test_adv_acc_i]
hist['train_loss'] += [train_loss_i]
hist['test_loss'] += [test_loss_i]
hist['train_adv_loss'] += [train_adv_loss_i]
hist['test_adv_loss'] += [test_adv_loss_i]
np.save('hist',hist)
saver.save(sess,'crop_ckpt')
print('done')
if __name__ == "__main__":
cfg = {'batch_size': 32,
'img_dim': 2,
'img_size': 28,
'num_glimpse': 5,
'glimpse_size': 20,
'lr': 1e-4
}
main(cfg)
| [
"[email protected]"
] | |
31293892611c10cb6f78d5d022590cb0fd1f5d9c | 56ade096db1fe376ee43d38c96b43651ee07f217 | /326. Power of Three/Python/Solution.py | c065d49b669d5e5476e90c9150b003991cc8f091 | [] | no_license | xiaole0310/leetcode | c08649c3f9a9b04579635ee7e768fe3378c04900 | 7a501cf84cfa46b677d9c9fced18deacb61de0e8 | refs/heads/master | 2020-03-17T05:46:41.102580 | 2018-04-20T13:05:32 | 2018-04-20T13:05:32 | 133,328,416 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n <= 0:
return False
return (3 ** 19) % n == 0
| [
"[email protected]"
] | |
c321ff81f92bb3395f0e4dba6fc0c8587be08646 | b1cdbb08b51ce0edfc97351ea07f10166624e5cc | /src/rocks-pylib/rocks/commands/dump/appliance/attr/__init__.py | 6c6ac6efa9a4ddd893b4f02fb1a4175d1e36d338 | [] | no_license | tcooper/core | 2453220bdc305a4532fd6f0bda9fdb22560add21 | 61d2512146f34f71d09f817a3d07a56c979d1bf9 | refs/heads/master | 2021-08-22T17:17:20.080481 | 2017-11-30T19:33:53 | 2017-11-30T19:33:53 | 105,914,127 | 0 | 0 | null | 2017-10-05T16:32:48 | 2017-10-05T16:32:48 | null | UTF-8 | Python | false | false | 4,555 | py | # $Id: __init__.py,v 1.9 2012/11/27 00:48:14 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.2 (SideWindwer)
# version 7.0 (Manzanita)
#
# Copyright (c) 2000 - 2017 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:[email protected]
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: __init__.py,v $
# Revision 1.9 2012/11/27 00:48:14 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.8 2012/05/06 05:48:23 phil
# Copyright Storm for Mamba
#
# Revision 1.7 2011/07/23 02:30:28 phil
# Viper Copyright
#
# Revision 1.6 2010/09/15 18:45:23 bruno
# don't yak if an attribute doesn't have a value. and if an attribute doesn't
# have a value, then don't dump it.
#
# Revision 1.5 2010/09/07 23:52:52 bruno
# star power for gb
#
# Revision 1.4 2009/11/18 23:34:49 bruno
# cleanup help section
#
# Revision 1.3 2009/06/19 21:07:28 mjk
# - added dumpHostname to dump commands (use localhost for frontend)
# - added add commands for attrs
# - dump uses add for attr (does not overwrite installer set attrs)A
# - do not dump public or private interfaces for the frontend
# - do not dump os/arch host attributes
# - fix various self.about() -> self.abort()
#
# Revision 1.2 2009/05/01 19:06:56 mjk
# chimi con queso
#
# Revision 1.1 2008/12/23 00:14:05 mjk
# - moved build and eval of cond strings into cond.py
# - added dump appliance,host attrs (and plugins)
# - cond values are typed (bool, int, float, string)
# - everything works for client nodes
# - random 80 col fixes in code (and CVS logs)
#
import sys
import socket
import rocks.commands
import string
class Command(rocks.commands.dump.appliance.command):
"""
Dump the set of attributes for appliances.
<arg optional='1' type='string' name='appliance'>
Name of appliance
</arg>
<example cmd='dump appliance attr compute'>
List the attributes for compute appliances
</example>
"""
def run(self, params, args):
for appliance in self.newdb.getApplianceNames(args):
for attr in self.newdb.getCategoryAttrs('appliance', appliance.name):
v = self.quote(attr.value)
if v:
self.dump('add appliance attr %s %s %s'
% (appliance.name, attr.attr, v))
| [
"[email protected]"
] | |
89154813eb3ff9f4e9c50e0d31da866ddf5a105f | 1617a9a9c92146bcdac89b5efb1ef0d18408160b | /cont6lab/36/generator.py | 0977a7c5fc92dad574ae123ad1808f554c15b4d0 | [] | no_license | LitRidl/checker-content | 1b1329b4462b87731e0755ab33480ff063a94a00 | b5d0456c8d4d28db6e6022e272a95a385f253797 | refs/heads/master | 2023-08-17T18:08:07.377680 | 2018-02-04T11:16:34 | 2018-02-04T11:16:34 | 120,077,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,056 | py | from __future__ import print_function
from random import shuffle, randint, choice
from numpy import base_repr
import sys
HEX_DIGITS = '0123456789ABCDEF'
def rnd(l, r, base=10):
return base_repr(randint(l, r), base=base)
def rnd_word(length, prefix=' ', alphabet=HEX_DIGITS):
word = ''.join(choice(alphabet) for _ in range(length))
word = word.lstrip('0') or '0'
return (choice(prefix) + word).lstrip(' ') or '0'
def generate_test1(params):
a = rnd_word(randint(*params['word_len'][0]),
alphabet=HEX_DIGITS[:params['word_radix'][0]],
prefix=params['word_prefix'][0])
if not params['leading_zero']:
a = a.lstrip('0')
return '{0}'.format(a)
def generate_test2(params):
l1 = randint(*params['word_len'][0])
l2 = l1 # randint(*params['word_len'][1])
a = rnd_word(l1, alphabet=HEX_DIGITS[:params['word_radix'][0]],
prefix=params['word_prefix'][0])
b = rnd_word(l2, alphabet=HEX_DIGITS[:params['word_radix'][1]],
prefix=params['word_prefix'][0])
if not params['leading_zero']:
a, b = a.lstrip('0'), b.lstrip('0')
return '{0} {1}'.format(a, b)
def generate_test2_par(params):
while True:
_a = randint(*(params['word_len'][0]))
_b = randint(*(params['word_len'][1]))
if _a > _b:
break
a = _a
b = _b
return '{0} {1}'.format(a, b)
def seq_tests1(var1_bounds, var2_bounds, base=10):
test_idx = 2
for a in range(*var1_bounds):
with open('tests/{0:03d}.dat'.format(test_idx), 'w') as f:
res = '{0}'.format(base_repr(a, base))
f.write(res)
test_idx += 1
return test_idx
def seq_tests2(var1_bounds, var2_bounds, base=10):
test_idx = 2
for a in range(*var1_bounds):
for b in range(*var2_bounds):
with open('tests/{0:03d}.dat'.format(test_idx), 'w') as f:
res = '{0} {1}'.format(base_repr(a, base), base_repr(b, base))
f.write(res)
test_idx += 1
return test_idx
def rnd_tests(test_first=66, nums=2, tests=40, base=10):
params = {
'word_len': [(8, 12), (8, 12)],
'word_radix': [base, base],
'word_prefix': [' ', ' '], # can be ' +-', for example
'leading_zero': True
}
for test_idx in range(test_first, test_first + tests):
with open('tests/{0:03d}.dat'.format(test_idx), 'w') as f:
if nums == 1:
f.write(generate_test1(params))
elif nums == 2:
f.write(generate_test2(params))
NUMS = 1
BASE = 2
TESTS = 100 # 40
if __name__ == '__main__':
test_idx = 42 # SPECIFY!!!
if 's' in sys.argv[1]:
if NUMS == 1:
test_idx = seq_tests1([0, 40], [0, 0], base=BASE)
if NUMS == 2:
test_idx = seq_tests2([0, 8], [0, 8], base=BASE)
print('Seq tests until {0}'.format(test_idx))
if 'r' in sys.argv[1]:
rnd_tests(test_first=test_idx, nums=NUMS, tests=TESTS, base=BASE)
| [
"[email protected]"
] | |
732198405c718e475715d90fdb730cc8b135948c | dbd87fe6e9466c4cada18b037667cfdddc62c193 | /data/FX_CFD/XTB/utils.py | 78e5d6fefed7858147792c12c920d9589d12f8b3 | [] | no_license | alexanu/Python_Trading_Snippets | 74515a40dc63ba50d95bd50330ed05d59b5dc837 | 85969e681b9c74e24e60cc524a952f9585ea9ce9 | refs/heads/main | 2023-06-25T03:27:45.813987 | 2023-06-09T16:09:43 | 2023-06-09T16:09:43 | 197,401,560 | 18 | 17 | null | 2023-02-08T22:25:25 | 2019-07-17T14:05:32 | Jupyter Notebook | UTF-8 | Python | false | false | 481 | py | from itertools import product
import settings
from api import data_getter
from writer import write_tsv
def file_maker():
combis = product(settings.WATCHLIST_MAP, settings.INDICATORS)
for i in combis:
s = i[0].split(",")[0]
ind = i[1].split(",")
data = data_getter(symbol=s, indicator=ind[0], period=ind[1])
write_tsv(data=data[settings.TIMEFRAME], symbol=s, indicator=ind[0], period=ind[1])
if __name__ == '__main__':
file_maker()
| [
"[email protected]"
] | |
791450cd265e4fdf114e22cd378fa7697db6ffcf | 64ae9e59e387aa219183f6748f07ede3533c14b2 | /lib/auxiliary/switchd/workertask.py | 3f4ddb643005a336a65bf135c9c26feece16dbf4 | [
"BSD-2-Clause"
] | permissive | sohonet/HEN | fe0168816d908c9c5d3180e90e67b12e4724c7be | 47575028a6f3d3fe04d6839dd779b2b1b991accc | refs/heads/master | 2021-01-19T07:30:22.849260 | 2012-02-20T19:41:17 | 2012-02-20T19:41:17 | 87,548,153 | 0 | 0 | null | 2017-04-07T13:25:40 | 2017-04-07T13:25:40 | null | UTF-8 | Python | false | false | 1,214 | py | class TaskID:
checkSensorTask = 0
hostStatusTask = 1
switchReadFdbTask = 2
linkStatusTask = 3
networkMapGenerationTask = 4
class WorkerTask:
nodeinstance = None
node = None
taskid = None
timeScheduled = None
def __init__(self, node, nodeinstance, taskid, timeScheduled):
self.nodeinstance = nodeinstance
self.node = node
self.taskid = taskid
self.timeScheduled = timeScheduled
def __del__(self):
del self.nodeinstance
del self.node
del self.taskid
del self.timeScheduled
def __str__(self):
s = "WorkerTask "
if self.taskid == TaskID.checkSensorTask:
s += "checkSensorTask "
elif self.taskid == TaskID.hostStatusTask:
s += "hostStatusTask "
elif self.taskid == TaskID.switchReadFdbTask:
s += "switchReadFdbTask "
elif self.taskid == TaskID.linkStatusTask:
s += "linkStatusTask "
elif self.taskid == TaskID.networkMapGenerationTask:
s += "networkMapGenerationTask "
try:
s += str(self.node.getNodeID())
except:
s += str(self.node)
return s
| [
"[email protected]"
] | |
02916aa12e26d8e7bd5c64c4e962ad2a3ddd86e0 | 63a1671145dc6dc6e1a9d10ec21c520b83036fea | /others/label_convert/show_img_by_yolo.py | 9a438d83718bba72dd0beed8655d3fa9949b61fa | [
"MIT"
] | permissive | chenpaopao/- | 4eca1405a3aab86fe649817048852b620a962c1a | 320f7d9a0b9e49528561024217ba07645eb68805 | refs/heads/master | 2023-02-04T17:44:48.639136 | 2022-06-06T05:04:57 | 2022-06-06T05:04:57 | 323,789,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,590 | py | import argparse
import os
import sys
from collections import defaultdict
import cv2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from tqdm import tqdm
category_set = dict()
image_set = set()
every_class_num = defaultdict(int)
category_item_id = -1
def xywhn2xyxy(box, size):
box = list(map(float, box))
size = list(map(float, size))
xmin = (box[0] - box[2] / 2.) * size[0]
ymin = (box[1] - box[3] / 2.) * size[1]
xmax = (box[0] + box[2] / 2.) * size[0]
ymax = (box[1] + box[3] / 2.) * size[1]
return (xmin, ymin, xmax, ymax)
def addCatItem(name):
global category_item_id
category_item = dict()
category_item_id += 1
category_item['id'] = category_item_id
category_item['name'] = name
category_set[name] = category_item_id
return category_item_id
def draw_box(img, objects, draw=True):
for object in objects:
category_name = object[0]
every_class_num[category_name] += 1
if category_name not in category_set:
category_id = addCatItem(category_name)
else:
category_id = category_set[category_name]
xmin = int(object[1][0])
ymin = int(object[1][1])
xmax = int(object[1][2])
ymax = int(object[1][3])
if draw:
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
palette = [hex2rgb('#' + c) for c in hex]
n = len(palette)
c = palette[int(category_id) % n]
bgr = False
color = (c[2], c[1], c[0]) if bgr else c
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color)
cv2.putText(img, category_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 1, color)
return img
def show_image(image_path, anno_path, show=False, plot_image=False):
assert os.path.exists(image_path), "image path:{} dose not exists".format(image_path)
assert os.path.exists(anno_path), "annotation path:{} does not exists".format(anno_path)
anno_file_list = [os.path.join(anno_path, file) for file in os.listdir(anno_path) if file.endswith(".txt")]
with open(anno_path + "/classes.txt", 'r') as f:
classes = f.readlines()
category_id = dict((k, v.strip()) for k, v in enumerate(classes))
for txt_file in tqdm(anno_file_list):
if not txt_file.endswith('.txt') or 'classes' in txt_file:
continue
filename = txt_file.split(os.sep)[-1][:-3] + "jpg"
image_set.add(filename)
file_path = os.path.join(image_path, filename)
if not os.path.exists(file_path):
continue
img = cv2.imread(file_path)
if img is None:
continue
width = img.shape[1]
height = img.shape[0]
objects = []
with open(txt_file, 'r') as fid:
for line in fid.readlines():
line = line.strip().split()
category_name = category_id[int(line[0])]
bbox = xywhn2xyxy((line[1], line[2], line[3], line[4]), (width, height))
obj = [category_name, bbox]
objects.append(obj)
img = draw_box(img, objects, show)
if show:
cv2.imshow(filename, img)
cv2.waitKey()
cv2.destroyAllWindows()
if plot_image:
# 绘制每种类别个数柱状图
plt.bar(range(len(every_class_num)), every_class_num.values(), align='center')
# 将横坐标0,1,2,3,4替换为相应的类别名称
plt.xticks(range(len(every_class_num)), every_class_num.keys(), rotation=90)
# 在柱状图上添加数值标签
for index, (i, v) in enumerate(every_class_num.items()):
plt.text(x=index, y=v, s=str(v), ha='center')
# 设置x坐标
plt.xlabel('image class')
# 设置y坐标
plt.ylabel('number of images')
# 设置柱状图的标题
plt.title('class distribution')
plt.savefig("class_distribution.png")
plt.show()
if __name__ == '__main__':
"""
脚本说明:
该脚本用于yolo标注格式(.txt)的标注框可视化
参数明说:
image_path:图片数据路径
anno_path:txt标注文件路径
show:是否展示标注后的图片
plot_image:是否对每一类进行统计,并且保存图片
"""
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--image-path', type=str, default='./data/images', help='image path')
parser.add_argument('-ap', '--anno-path', type=str, default='./data/labels/yolo', help='annotation path')
parser.add_argument('-s', '--show', action='store_true', help='weather show img')
parser.add_argument('-p', '--plot-image', action='store_true')
opt = parser.parse_args()
if len(sys.argv) > 1:
print(opt)
show_image(opt.image_path, opt.anno_path, opt.show, opt.plot_image)
else:
image_path = './data/images'
anno_path = './data/labels/yolo'
show_image(image_path, anno_path, show=True, plot_image=True)
print(every_class_num)
print("category nums: {}".format(len(category_set)))
print("image nums: {}".format(len(image_set)))
print("bbox nums: {}".format(sum(every_class_num.values())))
| [
"[email protected]"
] | |
1e9b873b207de4e3ceafa838dad971dbb1499a7f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit365.py | eb19e320752840e2c905c3f8005c7be2d19bdece | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,388 | py | # qubit number=3
# total number=73
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.cx(input_qubit[0],input_qubit[2]) # number=70
prog.x(input_qubit[2]) # number=71
prog.cx(input_qubit[0],input_qubit[2]) # number=72
prog.h(input_qubit[2]) # number=67
prog.cz(input_qubit[0],input_qubit[2]) # number=68
prog.h(input_qubit[2]) # number=69
prog.h(input_qubit[2]) # number=64
prog.cz(input_qubit[0],input_qubit[2]) # number=65
prog.h(input_qubit[2]) # number=66
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.cx(input_qubit[2],input_qubit[1]) # number=63
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit365.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
5cd7bf15cbb9f9a315ace1403ce4ea46bc9b95db | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2345/60586/315364.py | 08640613734e1bd7c00a353bb3777990539f833a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | input()
input()
x=input()
input()
y=input()
if x=="2 2"and y=="1 2 3":
print("2 1")
print("0 0")
elif x=="2 2"and y=="1 3 3":
print("2 1")
print("3 2")
else:
print(x)
print(y) | [
"[email protected]"
] | |
92798e97c2e24cf01a2aa823f42f660aaab8293b | 08892167da611ae1d9fa36ac2c1232804da9d487 | /build/ur_dashboard_msgs/catkin_generated/pkg.develspace.context.pc.py | be83fb6cc713cfdfe3c18a4b00a74ad547de0f19 | [] | no_license | QuiN-cy/ROS_Test_ws | cfd11134312590cabe09f404a332e5f1c4415f59 | 6b041e3aa8c27212c5fc665d3c54dbde988b5d67 | refs/heads/main | 2023-05-15T06:19:16.680149 | 2021-06-11T15:34:05 | 2021-06-11T15:34:05 | 371,643,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/student/test_ws/devel/.private/ur_dashboard_msgs/include".split(';') if "/home/student/test_ws/devel/.private/ur_dashboard_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_dashboard_msgs"
PROJECT_SPACE_DIR = "/home/student/test_ws/devel/.private/ur_dashboard_msgs"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
2e3d8d7cfcfa60a44a0addf88f52ea50fef537c3 | cb63b64435af17aaa652d7efd9f624e0e9385085 | /todo/forms.py | af63ce00a410f0b86a199cb882b0d209adfb2464 | [] | no_license | Vostbur/todo-multiuser-site | e4bb6bb0713cb466fa5338c213911f3d53089db2 | b6e975255cbd74ce2319e64b885558d244454b43 | refs/heads/master | 2023-05-04T23:36:50.668891 | 2021-05-24T19:47:36 | 2021-05-24T19:47:36 | 370,466,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.forms import ModelForm
from .models import Todo
class TodoForm(ModelForm):
class Meta:
model = Todo
fields = ['title', 'description', 'important']
| [
"[email protected]"
] | |
e57a5bd566d8bd5e6aa1057bfd026b0c36f65ea1 | 33d078ea9a4dd549d99b01a3aff09d1f2032d6eb | /test/test_server.py | 926c6bae3aa8e125201d8c062760d2678483ddf8 | [] | no_license | labinxu/majong | f84fa34ce4ba82f5006f37f0ddc98b8c08445d10 | 234a82b11093f475d5fc4ea37d2b77a3d33877be | refs/heads/master | 2020-03-19T04:18:39.950419 | 2018-06-02T12:16:38 | 2018-06-02T12:16:38 | 135,814,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | import sys
if '..' not in sys.path:
sys.path.append('..')
import socket
from xserver import XServer
from player import Player
from protos.action_pb2 import Action
class Test_server(XServer):
def __init__(self, host,ip,players):
# define four players
super().__init__(host, ip, players)
def initcards(self):
"""
"""
self.cards = [1 ,1 ,1 ,2 ,2 ,3 ,3 ,4 ,4 ,4 ,5 ,7 ,6,
1 ,7 ,2 ,3 ,9, 5, 6, 7, 8, 9,7,3]
def __listenling(self):
"""
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
sock.bind((self.ip, self.port))
sock.listen(self.player_number)
self.sock = sock
self.logger.info('starting the server %s %s' % (self.ip, self.port))
i = 1
while True:
s, addr = sock.accept()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
player = Player(s, addr, i, self)
action = Action()
action.id = i
action.direct = 0
#init
action.action_type = Action.ACT_INIT
action.message = 'init player'
player.send_action(action)
self.players.append(player)
player.start()
i += 1
if __name__=="__main__":
xserver = Test_server('127.0.0.1', 20000, 1)
xserver.start()
| [
"[email protected]"
] | |
c006e72d485742e0e24ae62bfbfb57f79139c0dd | 2ce0c97025f0cc644bcb747e1a7bc41b15890618 | /src/powerful_benchmarker/ensembles/concatenate_embeddings.py | a6360ad6b300eeb1d7808ecc03f99a0a4389d704 | [] | no_license | kwotsin/powerful-benchmarker | 5412c9b42be385b0b525e9ae9893ba9d12bd21eb | b9cf4dd1ed1e21bfe10b9a88972e51f0db9a0545 | refs/heads/master | 2022-11-22T10:03:34.346800 | 2020-07-28T04:31:27 | 2020-07-28T04:31:27 | 285,510,355 | 2 | 0 | null | 2020-08-06T08:00:03 | 2020-08-06T08:00:02 | null | UTF-8 | Python | false | false | 1,029 | py | from ..utils import common_functions as c_f
import torch
from .base_ensemble import BaseEnsemble
from .. import architectures
class ConcatenateEmbeddings(BaseEnsemble):
def create_ensemble_model(self, list_of_trunks, list_of_embedders):
if isinstance(self.embedder_input_sizes[0], list):
self.embedder_input_sizes = [np.sum(x) for x in self.embedder_input_sizes]
normalize_embeddings_func = lambda x: torch.nn.functional.normalize(x, p=2, dim=1)
embedder_operation_before_concat = normalize_embeddings_func if self.normalize_embeddings else None
trunk_operation_before_concat = normalize_embeddings_func if self.use_trunk_output else None
trunk = torch.nn.DataParallel(architectures.misc_models.ListOfModels(list_of_trunks, operation_before_concat=trunk_operation_before_concat))
embedder = torch.nn.DataParallel(architectures.misc_models.ListOfModels(list_of_embedders, self.embedder_input_sizes, embedder_operation_before_concat))
return trunk, embedder | [
"[email protected]"
] | |
3b6bfc3e77f83f7cee554a520237036597617472 | 1faa248589efec362481be76b536e66fb16784b8 | /examples/datavault2-bigdata-example/dags/dvdrentals.py | d631cd5535ef9993bba4e0843ebecdd1e55dcfac | [] | no_license | JomaMinoza/etl-with-airflow | 97769b77dd34420efd7545f8cc0980e5fc67540a | cc4fa33b3fb300c79da7cb3696eed8cebac5ca67 | refs/heads/master | 2020-03-19T18:31:57.234918 | 2018-06-10T09:13:46 | 2018-06-10T09:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,102 | py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from datetime import datetime, timedelta
import os
import airflow
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.models import Variable
from acme.operators.pg_to_file_operator import StagePostgresToFileOperator
from acme.operators.file_to_hive_operator import StageFileToHiveOperator
from airflow.operators.hive_operator import HiveOperator
import acme.schema.dvdrentals_schema as schema
args = {
'owner': 'airflow',
'start_date': datetime(2007, 2, 15),
'end_date': datetime(2007, 5, 15),
'provide_context': True,
# We want to maintain chronological order when loading the datavault
'depends_on_past': True
}
dag = airflow.DAG(
'dvdrentals',
schedule_interval="@daily",
dagrun_timeout=timedelta(minutes=60),
template_searchpath='/usr/local/airflow/sql',
default_args=args,
max_active_runs=1)
extract_done = DummyOperator(
task_id='extract_done',
dag=dag)
daily_process_done = DummyOperator(
task_id='daily_process_done',
dag=dag)
staging_done = DummyOperator(
task_id='staging_done',
dag=dag)
loading_done = DummyOperator(
task_id='loading_done',
dag=dag)
def stage_table(pg_table, override_cols=None, dtm_attribute=None):
t1 = StagePostgresToFileOperator(
source='dvdrentals',
pg_table=pg_table,
dtm_attribute=dtm_attribute,
override_cols=override_cols,
postgres_conn_id='dvdrentals',
file_conn_id='filestore',
task_id=pg_table,
dag=dag)
t1 >> extract_done
def create_staging_operator(hive_table):
field_dict = schema.schemas[hive_table]
_, table = hive_table.split('.')
t1 = StageFileToHiveOperator(
hive_table=table + '_{{ts_nodash}}',
relative_file_path='incremental-load/dvdrentals/' + hive_table + '/{{ds[:4]}}/{{ds[5:7]}}/{{ds[8:10]}}/',
field_dict=field_dict,
create=True,
recreate=True,
file_conn_id='filestore',
hive_cli_conn_id='hive_dvdrentals_staging',
task_id='stage_{0}'.format(hive_table),
dag=dag)
daily_process_done >> t1 >> staging_done
return t1
def load_hub(hql, hive_table):
_, table = hive_table.split('.')
t1 = HiveOperator(
hql=hql,
hive_cli_conn_id='hive_datavault_raw',
schema='dv_raw',
task_id='load_{0}'.format(hive_table),
dag=dag)
staging_done >> t1 >> loading_done
return t1
def load_link(hql, hive_table):
_, table = hive_table.split('.')
t1 = HiveOperator(
hql=hql,
hive_cli_conn_id='hive_datavault_raw',
schema='dv_raw',
task_id='load_{0}'.format(hive_table),
dag=dag)
staging_done >> t1 >> loading_done
return t1
def load_sat(hql, hive_table):
_, table = hive_table.split('.')
t1 = HiveOperator(
hql=hql,
hive_cli_conn_id='hive_datavault_raw',
schema='dv_raw',
task_id='load_{0}'.format(hive_table),
dag=dag)
staging_done >> t1 >> loading_done
return t1
stage_table(pg_table='public.actor')
stage_table(pg_table='public.address')
stage_table(pg_table='public.category')
stage_table(pg_table='public.city')
stage_table(pg_table='public.country')
stage_table(pg_table='public.customer')
stage_table(pg_table='public.film')
stage_table(pg_table='public.film_actor')
stage_table(pg_table='public.film_category')
stage_table(pg_table='public.inventory')
stage_table(pg_table='public.language')
stage_table(pg_table='public.payment', dtm_attribute='payment_date')
stage_table(pg_table='public.rental')
stage_table(pg_table='public.staff', override_cols=[
'staff_id', 'first_name', 'last_name', 'address_id', 'email', 'store_id', 'active', 'last_update'])
stage_table(pg_table='public.store')
daily_dumps = BashOperator(
bash_command='/usr/local/airflow/dataflow/process_daily_full_dumps.sh {{ts}}',
task_id='daily_dumps',
dag=dag)
incremental_build = BashOperator(
bash_command='/usr/local/airflow/dataflow/start_incremental_dv.sh {{ts}}',
task_id='incremental_build',
dag=dag)
extract_done >> daily_dumps >> incremental_build >> daily_process_done
create_staging_operator('public.address')
create_staging_operator('public.actor')
create_staging_operator('public.category')
create_staging_operator('public.city')
create_staging_operator('public.country')
create_staging_operator('public.customer')
create_staging_operator('public.film')
create_staging_operator('public.film_actor')
create_staging_operator('public.film_category')
create_staging_operator('public.inventory')
create_staging_operator('public.language')
create_staging_operator('public.payment')
create_staging_operator('public.rental')
create_staging_operator('public.staff')
create_staging_operator('public.store')
load_hub('loading/hub_actor.hql', 'dv_raw.hub_actor')
load_hub('loading/hub_address.hql', 'dv_raw.hub_address')
load_hub('loading/hub_category.hql', 'dv_raw.hub_category')
load_hub('loading/hub_customer.hql', 'dv_raw.hub_customer')
load_hub('loading/hub_film.hql', 'dv_raw.hub_film')
load_hub('loading/hub_language.hql', 'dv_raw.hub_language')
load_hub('loading/hub_staff.hql', 'dv_raw.hub_staff')
load_hub('loading/hub_store.hql', 'dv_raw.hub_store')
load_link('loading/link_customer_address.hql', 'dv_raw.link_customer_address')
load_link('loading/link_film_actor.hql', 'dv_raw.link_film_actor')
load_link('loading/link_film_category.hql', 'dv_raw.link_film_category')
load_link('loading/link_film_language.hql', 'dv_raw.link_film_language')
load_link('loading/link_payment.hql', 'dv_raw.link_payment')
load_link('loading/link_rental.hql', 'dv_raw.link_rental')
load_link('loading/link_staff_address.hql', 'dv_raw.link_staff_address')
load_link('loading/link_staff_store.hql', 'dv_raw.link_staff_store')
load_link('loading/link_store_staff.hql', 'dv_raw.link_store_staff')
load_sat('loading/sat_actor.hql', 'dv_raw.sat_actor')
load_sat('loading/sat_address.hql', 'dv_raw.sat_address')
load_sat('loading/sat_category.hql', 'dv_raw.sat_category')
load_sat('loading/sat_customer.hql', 'dv_raw.sat_customer')
load_sat('loading/sat_film.hql', 'dv_raw.sat_film')
load_sat('loading/sat_language.hql', 'dv_raw.sat_language')
load_sat('loading/sat_payment.hql', 'dv_raw.sat_payment')
load_sat('loading/sat_staff.hql', 'dv_raw.sat_staff')
load_sat('loading/sat_store.hql', 'dv_raw.sat_store')
if __name__ == "__main__":
dag.cli()
| [
"[email protected]"
] | |
794ee8b5e9280e3685ccad77574041cb70b678f9 | f20931826a557f0d884f8b46de259840c29b7428 | /meiduo_mall/meiduo_mall/utils/views.py | 6277b442a78467dc0f3a239d427d6ac13b413536 | [] | no_license | zy723/meiduo_project | 38ccecc2fa1d61f2eb848ebc572dd43d45a534c8 | f50a8105c63554b57419cb3494c3d323bb343f9c | refs/heads/master | 2022-12-15T02:34:42.578549 | 2020-05-20T16:56:27 | 2020-05-20T16:56:27 | 248,264,846 | 0 | 0 | null | 2022-12-12T20:28:41 | 2020-03-18T15:08:40 | TSQL | UTF-8 | Python | false | false | 982 | py | from django import http
from django.contrib.auth.mixins import LoginRequiredMixin
from meiduo_mall.utils.response_code import RETCODE
class LoginRequiredJSONMixin(LoginRequiredMixin):
"""
验证用户并返回json 的扩展类
"""
def handle_no_permission(self):
"""
重写 handle_no_permission 返回json
:return:
"""
return http.JsonResponse({'code': RETCODE.SESSIONERR, 'errmsg': '用户未登录'})
"""
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
return redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
class LoginRequiredMixin(AccessMixin):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
"""
| [
"[email protected]"
] | |
4574c829390dab1c4b2ccbccc3376035ba788e83 | a1b21aa9b4c3b99b9b16fd47686bcc76e6fafd18 | /playground/people.py | b0f3a9910a955ad3632716f3dd4206debc55f55e | [] | no_license | irfan87/python_tutorial | 986c5dae98a5ad928c3820bf0355f544c091caf0 | 71bbf8b8aba2d5a1fafc56b8cb15d471c428a0cf | refs/heads/master | 2020-06-05T00:52:07.619489 | 2019-08-19T02:56:41 | 2019-08-19T02:56:41 | 192,257,432 | 0 | 0 | null | 2019-08-19T02:56:42 | 2019-06-17T01:53:46 | Python | UTF-8 | Python | false | false | 689 | py | # make a list of people dictionary
people = []
person = {
'first_name': 'ahmad irfan',
'last_name': 'mohammad shukri',
'age': 32,
'job': 'web developer'
}
people.append(person)
person = {
'first_name': 'ivy',
'last_name': 'ying',
'age': 21,
'job': 'hair dresser'
}
people.append(person)
person = {
'first_name': 'james',
'last_name': 'edward',
'age': 32,
'job': 'youtube vlogger'
}
people.append(person)
for person in people:
full_name = f"{person['first_name'].title()} {person['last_name'].title()}"
age = f"{person['age']}"
job = f"{person['job'].title()}"
print(f"\nFullname: {full_name}\nAge: {age}\nJob: {job}") | [
"[email protected]"
] | |
92cfb05db74646ccddbc5c70833a6a1303308641 | 2e43fc58f2a70b38c8f74101d639d1ad6fffb609 | /ParadoxTrading/Indicator/General/KDJ.py | bcec73dc9792d1d6b3c2bea58f33a1710705b2a3 | [
"MIT"
] | permissive | ppaanngggg/ParadoxTrading | 9cac27dee26a49739dde661c1e03d83bda09df9b | 2c4024e60b14bf630fd141ccd4c77f197b7c901a | refs/heads/master | 2021-05-11T20:13:14.871616 | 2018-07-13T05:49:15 | 2018-07-13T05:49:15 | 117,434,771 | 96 | 26 | MIT | 2018-03-21T08:47:27 | 2018-01-14T13:57:16 | Python | UTF-8 | Python | false | false | 1,769 | py | import statistics
from collections import deque
from ParadoxTrading.Indicator.IndicatorAbstract import IndicatorAbstract
from ParadoxTrading.Utils import DataStruct
class KDJ(IndicatorAbstract):
def __init__(
self,
_k_period: int = 20,
_d_period: int = 3,
_j_period: int = 3,
_close_key: str = 'closeprice',
_high_key: str = 'highprice',
_low_key: str = 'lowprice',
_idx_key: str = 'time',
_ret_key=('k', 'd', 'j')
):
super().__init__()
self.k_period = _k_period
self.d_period = _d_period
self.j_period = _j_period
self.close_key = _close_key
self.high_key = _high_key
self.low_key = _low_key
self.idx_key = _idx_key
self.keys = [self.idx_key] + list(_ret_key)
self.high_buf = deque(maxlen=self.k_period)
self.low_buf = deque(maxlen=self.k_period)
self.k_buf = deque(maxlen=self.d_period)
self.data = DataStruct(
self.keys, self.idx_key
)
def _addOne(self, _data: DataStruct):
index_value = _data.index()[0]
closeprice = _data[self.close_key][0]
highprice = _data[self.high_key][0]
lowprice = _data[self.low_key][0]
self.high_buf.append(highprice)
self.low_buf.append(lowprice)
high_mean = statistics.mean(self.high_buf)
low_mean = statistics.mean(self.low_buf)
k = 100 * (closeprice - high_mean) / (high_mean - low_mean)
self.k_buf.append(k)
d = statistics.mean(self.k_buf)
j = self.j_period * k - (self.j_period - 1) * d
self.data.addRow(
[index_value, k, d, j],
self.keys
)
| [
"[email protected]"
] | |
9b9fb2306b6d63ca35fd6410f142f97e9fbe80e2 | 3faf4b9fb76145b2326446bc6bc190a5712b3b62 | /Algorithms/0547 Friend Circles.py | 8df58948a81a0e77ac8d3781796868dd3678e6a6 | [] | no_license | cravo123/LeetCode | b93c18f3e4ca01ea55f4fdebceca76ccf664e55e | 4c1288c99f78823c7c3bac0ceedd532e64af1258 | refs/heads/master | 2021-07-12T11:10:26.987657 | 2020-06-02T12:24:29 | 2020-06-02T12:24:29 | 152,670,206 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | import collections
# Solution 1, DFS
class Solution:
def dfs(self, idx, seen, M, n):
seen.add(idx)
for i in range(n):
if i not in seen and M[idx][i] == 1:
self.dfs(i, seen, M, n)
def findCircleNum(self, M: List[List[int]]) -> int:
n = len(M)
seen = set()
res = 0
for i in range(n):
if i not in seen:
res += 1
self.dfs(i, seen, M, n)
return res
# Solution 1.1, DFS, building graph first
class Solution:
def dfs(self, idx, seen, d):
seen.add(idx)
for j in d[idx]:
if j not in seen:
self.dfs(j, seen, d)
def findCircleNum(self, M: List[List[int]]) -> int:
n = len(M)
d = collections.defaultdict(set)
for i in range(n):
for j in range(i):
if M[i][j] == 1:
d[i].add(j)
d[j].add(i)
seen = set()
cnt = 0
for i in range(n):
if i not in seen:
self.dfs(i, seen, d)
cnt += 1
return cnt
# Solution 2, BFS
class Solution:
def bfs(self, idx, seen, M, n):
q = [idx]
seen.add(idx)
while q:
tmp = []
for idx in q:
for i in range(n):
if M[idx][i] == 1 and i not in seen:
tmp.append(i)
seen.add(i)
q = tmp
def findCircleNum(self, M: List[List[int]]) -> int:
n = len(M)
res = 0
seen = set()
for i in range(n):
if i not in seen:
res += 1
self.bfs(i, seen, M, n)
return res
# Solution 3, Union Find
# For Union-Find and Trie problem, it is always better to decouple the logic
# and implement Union Find and Trie as separate classes. You will find it it
# much easier and less error-prone to implement!
class UFS:
def __init__(self):
self.size = 0
self.idx = 0
self.d = {} # data -> idx
self.parent = {} # idx -> its parent
def add_point(self, v):
if v not in self.d:
self.d[v] = self.idx
self.parent[self.idx] = self.idx
self.idx += 1
self.size += 1
def dfs(self, i):
if i != self.parent[i]:
self.parent[i] = self.dfs(self.parent[i])
return self.parent[i]
def find_parent(self, v):
i = self.d[v]
p_i = self.dfs(i)
return p_i
def union(self, u, v):
p_u, p_v = self.find_parent(u), self.find_parent(v)
if p_u != p_v:
self.parent[p_u] = p_v
self.size -= 1
def get_size(self):
return self.size
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
ufs = UFS()
n = len(M)
for i in range(n):
ufs.add_point(i)
for i in range(n):
for j in range(i):
if M[i][j] == 1:
ufs.union(i, j)
return ufs.get_size() | [
"[email protected]"
] | |
c79df475ffbfb4d044fa2c896464eb9892f52c11 | 8c6f7475f372c76bcc9c6538bd1d2b4a50cadf74 | /trackerproject/trackerwebapp/views/students/student_withdrawal.py | 2d69d05367f568f2c774d719ea130132ff09ce68 | [] | no_license | stevebrownlee/python-foundations-tracker | a2169a70a0d29bd38eb416dfefb53110f69576b4 | de1cd4fa6887bed725eabcc2e6842ced882adaf0 | refs/heads/master | 2023-05-25T11:16:55.252655 | 2021-05-31T20:23:48 | 2021-05-31T20:23:48 | 276,436,092 | 1 | 1 | null | 2021-06-09T19:23:10 | 2020-07-01T17:02:37 | Python | UTF-8 | Python | false | false | 509 | py | from datetime import date
from django.shortcuts import render, redirect
from django.urls import reverse
from ...models import Student, Cohort
def student_withdrawal(request):
if request.method == 'POST':
student_id = request.POST.get('student', None)
student = Student.objects.get(pk=student_id)
student.withdrawn = True
student.withdrawn_date = date.today()
student.save()
return redirect(reverse('student_report', kwargs={'student_id': student_id}))
| [
"[email protected]"
] | |
b011141f26bcb341f2544953a40d8dfec7d492c9 | f68eda51246c95597def569224f3b56d4c3700e7 | /top/api/rest/WdtDictLogisticsQueryRequest.py | c22cdf7b0450254198c06a58e33cabca338fbc47 | [
"MIT",
"BSD-3-Clause"
] | permissive | stoensin/taobao-openapi | 47de8fb29ae2d8ce47d4fce07c0ccaeaee1ef91f | 202a9df2085229838541713bd24433a90d07c7fc | refs/heads/main | 2023-07-17T02:17:51.527455 | 2021-08-25T15:08:49 | 2021-08-25T15:08:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | '''
Created by auto_sdk on 2020.06.01
'''
from top.api.base import RestApi
class WdtDictLogisticsQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.page_no = None
self.page_size = None
self.sid = None
def getapiname(self):
return 'hu3cgwt0tc.wdt.dict.logistics.query'
| [
"[email protected]"
] | |
53283ad6f5c2217a5a71cff991b53a10091daa3b | 09d3599c8e53b38104e96d479b2b40ac549d0bef | /Rakesh/permutation-combination/palindromic_permutation.py | 128dfbbc9c13ad0ed8713cdc04433f515971161b | [] | no_license | rakeshsukla53/interview-preparation | 54764c83c86a52c566899ec87d74dad84216764d | 09355094c85496cc42f8cb3241da43e0ece1e45a | refs/heads/master | 2016-09-06T02:08:50.436414 | 2016-02-01T00:31:52 | 2016-02-01T00:31:52 | 40,916,511 | 9 | 3 | null | 2015-12-31T05:00:55 | 2015-08-17T17:59:55 | HTML | UTF-8 | Python | false | false | 1,726 | py |
from collections import Counter
from itertools import permutations, repeat
class Solution(object):
def generatePalindromes(self, s):
"""
Generate all palindrome of a given sequence
:type s: str
:rtype: List[str]
"""
# for not be palindrome we cannot have two character with frequency of 1
all_combination = []
if len(s) == 1 or len(set(s)) == 1:
return [s]
if len(filter(lambda x: x[1] % 2 == 1, Counter(s).items())) > 1:
return []
else:
if len(s) % 2 == 0:
if len(filter(lambda x: x[1] == 1, Counter(s).items())) == 1:
return []
else:
result = []
word_frequency = Counter(s)
for letters in word_frequency:
result.extend(repeat(letters, word_frequency[letters] / 2))
for i in permutations("".join(result), len(result)):
all_combination.append("".join(list(i)) + "".join(list(i[::-1])))
return all_combination
else:
result = []
word_frequency = Counter(s)
for letters in word_frequency:
if word_frequency[letters] % 2 == 1:
middle_character = letters
result.extend(repeat(letters, word_frequency[letters] / 2))
for i in permutations("".join(result), len(result)):
all_combination.append("".join(list(i)) + middle_character + "".join(list(i[::-1])))
return all_combination
print Solution().generatePalindromes("aabaa")
| [
"[email protected]"
] | |
07549c00bd4f9d4c98dacd7d1e6de9ae69f986e3 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/container/v1beta1/container-v1beta1-py/google/container_v1beta1/types/__init__.py | 63e366e3fc48ad2bfb6cf302007e3f7562e4d6eb | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,486 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .cluster_service import (
AcceleratorConfig,
AddonsConfig,
AuthenticatorGroupsConfig,
AutoprovisioningNodePoolDefaults,
AutoUpgradeOptions,
BinaryAuthorization,
CancelOperationRequest,
ClientCertificateConfig,
CloudRunConfig,
Cluster,
ClusterAutoscaling,
ClusterTelemetry,
ClusterUpdate,
CompleteIPRotationRequest,
ConfidentialNodes,
ConfigConnectorConfig,
CreateClusterRequest,
CreateNodePoolRequest,
DailyMaintenanceWindow,
DatabaseEncryption,
DefaultSnatStatus,
DeleteClusterRequest,
DeleteNodePoolRequest,
DnsCacheConfig,
EphemeralStorageConfig,
GcePersistentDiskCsiDriverConfig,
GetClusterRequest,
GetJSONWebKeysRequest,
GetJSONWebKeysResponse,
GetNodePoolRequest,
GetOpenIDConfigRequest,
GetOpenIDConfigResponse,
GetOperationRequest,
GetServerConfigRequest,
HorizontalPodAutoscaling,
HttpLoadBalancing,
IntraNodeVisibilityConfig,
IPAllocationPolicy,
IstioConfig,
Jwk,
KalmConfig,
KubernetesDashboard,
LegacyAbac,
LinuxNodeConfig,
ListClustersRequest,
ListClustersResponse,
ListLocationsRequest,
ListLocationsResponse,
ListNodePoolsRequest,
ListNodePoolsResponse,
ListOperationsRequest,
ListOperationsResponse,
ListUsableSubnetworksRequest,
ListUsableSubnetworksResponse,
Location,
MaintenancePolicy,
MaintenanceWindow,
Master,
MasterAuth,
MasterAuthorizedNetworksConfig,
MaxPodsConstraint,
NetworkConfig,
NetworkPolicy,
NetworkPolicyConfig,
NodeConfig,
NodeKubeletConfig,
NodeManagement,
NodePool,
NodePoolAutoscaling,
NodeTaint,
NotificationConfig,
Operation,
OperationProgress,
PodSecurityPolicyConfig,
PrivateClusterConfig,
PrivateClusterMasterGlobalAccessConfig,
RecurringTimeWindow,
ReleaseChannel,
ReservationAffinity,
ResourceLimit,
ResourceUsageExportConfig,
RollbackNodePoolUpgradeRequest,
SandboxConfig,
ServerConfig,
SetAddonsConfigRequest,
SetLabelsRequest,
SetLegacyAbacRequest,
SetLocationsRequest,
SetLoggingServiceRequest,
SetMaintenancePolicyRequest,
SetMasterAuthRequest,
SetMonitoringServiceRequest,
SetNetworkPolicyRequest,
SetNodePoolAutoscalingRequest,
SetNodePoolManagementRequest,
SetNodePoolSizeRequest,
ShieldedInstanceConfig,
ShieldedNodes,
StartIPRotationRequest,
StatusCondition,
TimeWindow,
TpuConfig,
UpdateClusterRequest,
UpdateMasterRequest,
UpdateNodePoolRequest,
UpgradeEvent,
UsableSubnetwork,
UsableSubnetworkSecondaryRange,
VerticalPodAutoscaling,
WorkloadIdentityConfig,
WorkloadMetadataConfig,
DatapathProvider,
UpgradeResourceType,
)
__all__ = (
'AcceleratorConfig',
'AddonsConfig',
'AuthenticatorGroupsConfig',
'AutoprovisioningNodePoolDefaults',
'AutoUpgradeOptions',
'BinaryAuthorization',
'CancelOperationRequest',
'ClientCertificateConfig',
'CloudRunConfig',
'Cluster',
'ClusterAutoscaling',
'ClusterTelemetry',
'ClusterUpdate',
'CompleteIPRotationRequest',
'ConfidentialNodes',
'ConfigConnectorConfig',
'CreateClusterRequest',
'CreateNodePoolRequest',
'DailyMaintenanceWindow',
'DatabaseEncryption',
'DefaultSnatStatus',
'DeleteClusterRequest',
'DeleteNodePoolRequest',
'DnsCacheConfig',
'EphemeralStorageConfig',
'GcePersistentDiskCsiDriverConfig',
'GetClusterRequest',
'GetJSONWebKeysRequest',
'GetJSONWebKeysResponse',
'GetNodePoolRequest',
'GetOpenIDConfigRequest',
'GetOpenIDConfigResponse',
'GetOperationRequest',
'GetServerConfigRequest',
'HorizontalPodAutoscaling',
'HttpLoadBalancing',
'IntraNodeVisibilityConfig',
'IPAllocationPolicy',
'IstioConfig',
'Jwk',
'KalmConfig',
'KubernetesDashboard',
'LegacyAbac',
'LinuxNodeConfig',
'ListClustersRequest',
'ListClustersResponse',
'ListLocationsRequest',
'ListLocationsResponse',
'ListNodePoolsRequest',
'ListNodePoolsResponse',
'ListOperationsRequest',
'ListOperationsResponse',
'ListUsableSubnetworksRequest',
'ListUsableSubnetworksResponse',
'Location',
'MaintenancePolicy',
'MaintenanceWindow',
'Master',
'MasterAuth',
'MasterAuthorizedNetworksConfig',
'MaxPodsConstraint',
'NetworkConfig',
'NetworkPolicy',
'NetworkPolicyConfig',
'NodeConfig',
'NodeKubeletConfig',
'NodeManagement',
'NodePool',
'NodePoolAutoscaling',
'NodeTaint',
'NotificationConfig',
'Operation',
'OperationProgress',
'PodSecurityPolicyConfig',
'PrivateClusterConfig',
'PrivateClusterMasterGlobalAccessConfig',
'RecurringTimeWindow',
'ReleaseChannel',
'ReservationAffinity',
'ResourceLimit',
'ResourceUsageExportConfig',
'RollbackNodePoolUpgradeRequest',
'SandboxConfig',
'ServerConfig',
'SetAddonsConfigRequest',
'SetLabelsRequest',
'SetLegacyAbacRequest',
'SetLocationsRequest',
'SetLoggingServiceRequest',
'SetMaintenancePolicyRequest',
'SetMasterAuthRequest',
'SetMonitoringServiceRequest',
'SetNetworkPolicyRequest',
'SetNodePoolAutoscalingRequest',
'SetNodePoolManagementRequest',
'SetNodePoolSizeRequest',
'ShieldedInstanceConfig',
'ShieldedNodes',
'StartIPRotationRequest',
'StatusCondition',
'TimeWindow',
'TpuConfig',
'UpdateClusterRequest',
'UpdateMasterRequest',
'UpdateNodePoolRequest',
'UpgradeEvent',
'UsableSubnetwork',
'UsableSubnetworkSecondaryRange',
'VerticalPodAutoscaling',
'WorkloadIdentityConfig',
'WorkloadMetadataConfig',
'DatapathProvider',
'UpgradeResourceType',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
b4e791d6d5b79e2d392a46edb0f370b10920f691 | 3e30e334d759a3606be1dce6dca1154568808d68 | /xlsxwriter/test/comparison/test_hyperlink05.py | 639cc3fb1843d81ac62e040af046d02a719c2ee3 | [
"BSD-2-Clause-Views"
] | permissive | cimarronm/XlsxWriter | 59668816c4faf5b2e5fc7b96a4ab98d5797891b7 | b440055bebfcc08339bc3e43cc2ce9819a142004 | refs/heads/master | 2021-01-18T10:07:21.729676 | 2014-08-12T23:17:43 | 2014-08-12T23:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', 'http://www.perl.org/')
worksheet.write_url('A3', 'http://www.perl.org/', None, 'Perl home')
worksheet.write_url('A5', 'http://www.perl.org/', None, 'Perl home', 'Tool Tip')
worksheet.write_url('A7', 'http://www.cpan.org/', None, 'CPAN', 'Download')
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
cd99cc88148eee700ffc8af3c5913d772fc4bb78 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /391.py | 3cab8540bcc577d43e48c5eceebe7ebc5562b23d | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | # ref: https://discuss.leetcode.com/topic/55923/o-n-solution-by-counting-corners
# -with-detailed-explaination
# ref: https://discuss.leetcode.com/topic/56064/python-solution-based-on-hxtang
# -s-idea
import collections
class Solution(object):
def isRectangleCover(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: bool
"""
left = min(x[0] for x in rectangles)
right = max(x[2] for x in rectangles)
top = max(x[3]for x in rectangles)
bottom = min(x[1] for x in rectangles)
points = collections.defaultdict(int)
for l, b, r, t in rectangles:
A, B, C, D = (l, b), (r, b), (r, t), (l, t)
for p, q in zip((A, B, C, D), (1, 2, 4, 8)):
if points[p] & q: # avoid rectangles overlapping at a point
return False
points[p] |= q
for px, py in points:
# check all points except the outermost corners
if left < px < right or bottom < py < top:
if points[(px, py)] not in (3, 6, 9, 12, 15):
return False
return True
if __name__ == '__main__':
sol = Solution()
print sol.isRectangleCover([[1, 1, 3, 3], [3, 1, 4, 2], [1, 3, 2, 4],
[3, 2, 4, 4]])
| [
"[email protected]"
] | |
2042d5174b3aa20c74ebf96ade33fe236381d38b | 8d0f432027f1c6ee318f0a065caa9e666d00fb6b | /doit_algorithm/chap01/max3_func.py | 0ee5290250ea8289f09a0369726e4e06b2c21e4d | [] | no_license | HC-kang/algorithm | ca72e1e4edd8a9c68fca32d15739818dcfbb3c8b | 5cd459608bffe28e4de9cfb3ab21880e64456201 | refs/heads/master | 2023-08-14T16:17:19.501251 | 2021-09-20T02:01:59 | 2021-09-20T02:01:59 | 363,345,024 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | def max3(a, b, c):
maximum = a
if b > maximum: maximum=b;
if c > maximum: maximum=c;
return maximum
print(f'max3(3,2,1) = {max3(3,2,1)}')
print(f'max3(3,2,2) = {max3(3,2,2)}')
print(f'max3(3,1,2) = {max3(3,1,2)}')
print(f'max3(3,2,3) = {max3(3,2,3)}')
print(f'max3(2,1,3) = {max3(2,1,3)}')
print(f'max3(3,3,2) = {max3(3,3,2)}')
print(f'max3(3,3,3) = {max3(3,3,3)}')
print(f'max3(2,2,3) = {max3(2,2,3)}')
print(f'max3(2,3,1) = {max3(2,3,1)}') | [
"[email protected]"
] | |
d4d98087653712e9fcd891ab6bb3bbb1c2f31baa | c46515f86db0e36f8eb7276d3aa8c5b9ced6f0a1 | /disintegrating_plug/alternative_approach.py | abc7f6c41a45f9306a3a9afa51c48dc2e048abb6 | [
"MIT"
] | permissive | bolverk/disintegrating_bullet | 63e9e7b78576868840bbb75a4631d103581f295f | 676bd2f575a70497ee0bebee801405f59df7bc9a | refs/heads/master | 2022-01-24T06:36:05.231779 | 2022-01-23T17:11:32 | 2022-01-23T17:11:32 | 109,408,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,615 | py | from box import Box
import sympy
from probe import show
from rhd import (psi, p, eta, c, t, r, nu)
from riemann_invariant import (
calc_planar_riemann_invariant,
calc_spherical_riemann_invariant
)
from my_logging import logger
from caching import memory
from equation_of_motion import alpha
def acceleration_transformation():
sympy.var('a t c gamma beta', positive=True)
rf = sympy.Matrix([a*t**2/2,c*t])
boost = gamma*sympy.Matrix([[1,sympy.sqrt(1-1/gamma**2)],
[sympy.sqrt(1-1/gamma**2),1]])
lf = boost*rf
lf.simplify()
_ = (lf[0].diff(t)/lf[1].diff(t)).diff(t)/lf[1].diff(t)
_ = _.series(t,0,1)
_ = _.removeO()
_ = _.simplify()
return _
@memory.cache
def traditional_planar_breakup():
sympy.var('M gamma t A p_i gamma_i alpha w c n n_1 w_1 eta a mu',
positive=True)
def prepare_riemann_invariant():
_ = calc_planar_riemann_invariant()
_ = _.subs(psi, sympy.log(gamma))
_ = _ - _.subs({gamma:gamma_i, p:p_i})
return _
def make_eqns():
return Box(
{'eom':M*gamma*c/t - p*A,
'ri':prepare_riemann_invariant(),
'baryon':A*(n_1*w_1-n*w),
'adiabatic':p/p_i-(n/n_1)**eta}
)
def make_gamma_vs_t():
xi = sympy.Symbol('xi', positive=True)
_ = u.eom
_ = _.subs(M,A*w*p/c**2)
_ = _/p/A
_ = _.simplify()
_ = _.subs(sympy.solve(u.baryon, w, dict=True)[0])
_ = _.subs(sympy.solve(u.adiabatic, n, dict=True)[0])
_ = _.subs(sympy.solve(u.ri, p, dict=True)[0])
_ = _.subs(eta ,xi**2+1)
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = sympy.solve(_, gamma)[0]
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = _.subs(xi, sympy.sqrt(eta-1))
return _
@memory.cache
def calc_t_breakup():
xi = sympy.Symbol('xi', positive=True)
_ = (t/gamma)**2*(a/w)*(mu*n*c**2)/p
_ = _.subs(sympy.solve(u.baryon, w, dict=True)[0])
_ = _.subs(sympy.solve(u.adiabatic, n, dict=True)[0])
_ = _.subs(sympy.solve(u.ri, p, dict=True)[0])
_ = _.subs(a, c*gamma/t)
_ = _.subs(gamma, u.gamma_vs_t)
_ = _.subs(eta, xi**2+1)
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = sympy.solve(_-1, t)[0]
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = _.subs(xi, sympy.sqrt(eta-1))
return _
def calc_gamma_breakup():
xi = sympy.Symbol('xi', positive=True)
_ = u.gamma_vs_t.subs(t, u.t_breakup)
_ = _.subs(p_i, mu*gamma_i*n_1*c**2)
_ = _.subs(eta, xi**2+1)
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = _.subs(xi, sympy.sqrt(eta-1))
return _
logger.debug('begin alternative_approach')
u = make_eqns()
logger.debug('finished make_eqns')
u['gamma_vs_t'] = make_gamma_vs_t()
logger.debug('finished make_gamma_vs_t')
u['t_breakup'] = calc_t_breakup()
logger.debug('finished calc_t_breakup')
u['gamma_breakup'] = calc_gamma_breakup()
logger.debug('finished calc_gamma_breakup')
return u
def traditional_spherical_breakup():
sympy.var('gamma w n w_1 n_1 p_i M a mu gamma_i t_i',
positive=True)
def make_ri():
xi = sympy.Symbol('xi', positive=True)
_ = calc_spherical_riemann_invariant()
_ = _.subs(psi, sympy.log(gamma))
_ = _.subs(p,xi)
_ = _.subs(r, t*c)
_ = _ - _.subs({gamma:gamma_i,
t:t_i,
xi:p_i})
_ = _.subs(xi, p)
return _
def make_eom():
rhs = M*gamma*c/t
area = (alpha*t*c)**2
lhs = p*area
_ = sympy.log(rhs) - sympy.log(lhs)
_ = sympy.expand_power_base(_, force=True)
_ = _.simplify()
_ = sympy.expand(_)
return _
def make_baryon():
rhs = alpha**2*c**2*t**2*w*n
lhs = rhs.subs({t:t_i,w:w_1,n:n_1})
_ = sympy.log(lhs) - sympy.log(rhs)
_ = sympy.expand(_)
return _
def make_adiabatic():
rhs = sympy.log(p) - eta*sympy.log(n)
lhs = rhs.subs({p:p_i,n:n_1})
_ = sympy.log(rhs) - sympy.log(lhs)
_ = sympy.expand(_)
return _
def make_eqns():
return Box(
{'eom':make_eom(),
'ri':make_ri(),
'baryon':make_baryon(),
'adiabatic':make_adiabatic()})
def make_gamma_vs_t():
xi = sympy.Symbol('xi', positive=True)
_ = u.eom
_ = _.subs(M, alpha**2*t**2*c**2*w*p/c**2)
_ = _.subs(sympy.solve(u.baryon, w, dict=True)[0])
_ = sympy.expand(_)
_ = _.subs(sympy.solve(u.adiabatic, sympy.log(n), dict=True)[0])
_ = sympy.expand(_)
_ = _.subs(sympy.solve(u.ri, sympy.log(p), dict=True)[0])
_ = sympy.expand(_)
_ = _.subs(nu, 2)
_ = sympy.expand(_)
_ = sympy.solve(_, sympy.log(gamma))[0]
_ = sympy.expand(_)
return _
def calc_t_breakup():
xi = sympy.Symbol('xi', positive=True)
_ = (t/gamma)**2*(a/w)*(mu*n*c**2)/p
_ = _.subs(a, c*gamma/t)
_ = _.subs(p, xi)
_ = sympy.log(_)
_ = sympy.expand(_)
_ = _.subs(xi, p)
_ = _.subs(sympy.solve(u.baryon, w, dict=True)[0])
_ = sympy.expand(_)
_ = _.subs(sympy.solve(u.adiabatic, sympy.log(n), dict=True)[0])
_ = sympy.expand(_)
_ = _.subs(sympy.solve(u.ri, sympy.log(p), dict=True)[0])
_ = _.subs(nu, 2)
_ = sympy.expand(_)
_ = _.subs(sympy.log(gamma), u.gamma_vs_t)
_ = sympy.expand(_)
_ = _.subs(eta, sympy.Rational(4,3))
_ = _.n()
_ = sympy.solve(_, sympy.log(t))[0]
#_ = -_.subs(sympy.log(t),0)/_.subs(sympy.log(t),xi).diff(xi)
_ = sympy.expand(_)
return _
def calc_gamma_breakup():
_ = u.gamma_vs_t
_ = sympy.expand(_)
_ = _.subs(sympy.log(t), u.t_breakup)
_ = _.subs(p_i, n_1*mu*c**2*gamma_i)
_ = _.subs(eta, sympy.Rational(4,3))
_ = _.subs(w_1, t_i*c/gamma_i)
_ = _.n()
_ = sympy.expand(_)
_ = _.simplify()
return _
logger.debug('begin spherical breakup calculation')
u = make_eqns()
logger.debug('finished make eqns')
u['gamma_vs_t'] = make_gamma_vs_t()
logger.debug('finished gamma_vs_t')
u['t_breakup'] = calc_t_breakup()
logger.debug('finished t breakup')
u['gamma_breakup'] = calc_gamma_breakup()
logger.debug('finished gamma breakup')
return [u.gamma_breakup,
u.t_breakup.subs({p_i:gamma_i*mu*n_1*c**2,
w_1:t_i*c/gamma_i}).simplify()]
def calc_planar_breakup():
sympy.var('p_i gamma gamma_i p_t gamma_t rho_t rho_i xi',
positive=True)
initial = {p:p_i,
gamma:gamma_i}
final = {p:p_t,
gamma:gamma_t}
ri = calc_planar_riemann_invariant().subs(psi, sympy.log(gamma))
eqn1 = ri.subs(initial) - ri.subs(final)
eqn2 = sympy.log(p_t) - sympy.log(p_i) - eta*(sympy.log(rho_t)-sympy.log(rho_i))
_ = [eqn1, eqn2]
_ = sympy.Matrix(_)
_ = _.subs(p_i, gamma_i*rho_i)
_ = _.subs(rho_t, p_t)
_ = _.subs(eta, sympy.Rational(4,3))
_ = sympy.solve(_,[gamma_t, p_t])[0]
sol = _
return sol
if __name__ == '__main__':
show(locals())
| [
"[email protected]"
] | |
5b6c04aa591db9d8cb2167890a99aa0967e2659b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/counting_20200622230814.py | 361e416c5aa16e50da9614bce0b62e3f72c61ca1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | def counting(str):
str = str.split('-')
hour1 = int(convertTo24(str[0]).split(':')[0])
print('hour1',hour1)
hour2 = int(convertTo24(str[1]).split(':')[0])
print('hour2',hour2)
minutes1 = int(convertTo24(str[0]).split(':')[1])
print('min1',minutes1)
minutes2 = int(convertTo24(str[1]).split(':')[1])
print('min2',minutes2)
def convertTo24(hour):
newHour = ''
if 'am' in hour and hour[:2] == '12':
newHour = '24'
newHour += hour[2:5]
elif 'pm' in hour and hour[:2] == '12':
newHour = hour[:2]
newHour += hour[2:5]
elif 'pm' in hour:
print(hour[:1])
newHour = str(int(hour[:1]) +12)
newHour += hour[1:4]
elif 'am' in hour:
newHour = hour[:4]
else:
newHour = hour[:5]
return newHour
counting("11:00pm -12:00am") | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.