blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
6bc8e6bda70fb29b075f4f3c8c40b9a6b2656fcf
9c6e63eb1796bbf4c37d93fca941fb67b4cd4741
/trunk/scarlett/app.py
7f7179015d2a9cefbdbe4557f2fd080029521298
[]
no_license
BGCX261/zizw-svn-to-git
ffc6636d8e0d91b24f124ba3d16c61af10d7441c
c8d068af7a36396ce707dc035b15330c77b02f2a
refs/heads/master
2016-09-05T13:11:22.053860
2015-08-25T15:51:45
2015-08-25T15:51:45
41,585,036
0
0
null
null
null
null
UTF-8
Python
false
false
4,466
py
import logging import webob import wsgiref.handlers import simplejson.encoder import simplejson.decoder from google.appengine.ext import db from google.appengine.api import users from scarlett import model from scarlett import utils jsonEncoder = simplejson.encoder.JSONEncoder() jsonDecoder = simplejson.decoder.JSONDecoder() def scarlett(environ, start_response): # # create request & response objects # request = webob.Request(environ) response = webob.Response() # # create session object # session = Session(request) # do job channel = session.message["channel"] if channel == "refresh": if session.isAdmin: response.body = shell % ("Scarlett-Admin", "scarlett.Admin") elif session.user: response.body = shell % ("Scarlett", "scarlett.Main") else: response.body = shell % ("Login", "scarlett.Login") elif channel == "locateservice": fullName = str(session.message["fullName"]) service = utils.my_import(fullName) simpleName = fullName.split('.')[-1] response.body = generateServiceStub(service, fullName, simpleName) response.content_type = "text/plain" response.charset = "UTF-8" elif channel == "rmi": fullName = str(session.message["serviceName"]) methodName = str(session.message["methodName"]) args = session.message["args"]; argList = "" for i in range(len(args)): argList += "args[%s], " % i argList = argList[:-2] service = utils.my_import(fullName) outMessage = { "result": eval("service."+methodName+"(session, "+argList+")") } if fullName == "scarlett.admin" and methodName == "login" and outMessage["result"]: response.set_cookie("sid", userToSid(args[0])) response.body = jsonEncoder.encode(outMessage) response.content_type = "text/plain" response.charset = "UTF-8" elif channel == "admin": user = users.get_current_user() if not user: response.body = users.create_login_url("/") logging.info("admin: do login") else: response.body = "/" logging.info("admin: do normal") else: response.body = "unknown channel: %s" % str(channel) # return response(environ, start_response) # # Tips: # session.message # session.message.channel # session.isAdmin # session.user # session.user.alias # class Session(): def __init__(self, request): # # setting message # if request.method == "GET": self.message = {"channel":"refresh"} else: self.message = jsonDecoder.decode(request.body) # # setting isAdmin & user # if users.is_current_user_admin(): self.isAdmin = True self.user = None elif "sid" not in request.cookies: self.isAdmin = False self.user = None elif not request.cookies["sid"]: self.isAdmin = False self.user = None else: self.isAdmin = False self.user = sidToUser(request.cookies["sid"]) def sidToUser(sid): # # TODO: a real sid should be used # return model.User.get(db.Key.from_path("User", "ID_"+sid, _app="scarlett")) def userToSid(userName): # # TODO: a real sid should be used # return userName def generateServiceStub(service, fullName, simpleName): methodList= filter(lambda x : x[0:1]!= "_", dir(service)) stub = "var " + simpleName + " = function(){\n" stub += "}\n\n" for method in methodList: stub += simpleName + ".prototype." + method + " = function() {\n" stub += "\treturn jsloader.doRmi('%s', '%s', arguments);\n" % (fullName, method) stub += "};\n" return stub def main(): wsgiref.handlers.CGIHandler().run(scarlett) shell = """ <html> <head> <title>%s</title> <script> var App = null; var app = null; function init() { App = jsloader.resolve("%s") app = new App(document.body); var welcome = document.getElementById("welcome"); document.body.removeChild(welcome); } function destroy() { app.destroy(); } </script> </head> <body scroll="no" style="overflow: hidden; margin: 0px; padding: 0px" onload="init()" onunload="destroy()"> <span id="welcome">Loading ...</span> </body> <script src="js/lang/JSLoader.js"></script> </html> """ if __name__ == "__main__": main()
388430234a19c8d3bb7df514027066b68cf8fc68
3507fdc5012e55f6a784d70a7ad6da11224e5bfe
/caesar_cipher.py
2fa71c8cece652f7e97971c772561d702a65ad0c
[]
no_license
jonasthiel/100-days-of-code-python
640be865bdba10cca17ba72c4923cf9961ed570c
94ad366d10ed862c6c699ae1f242bd462f2ba597
refs/heads/main
2023-04-03T11:53:16.993098
2021-04-09T14:20:41
2021-04-09T14:20:41
330,404,825
0
0
null
null
null
null
UTF-8
Python
false
false
1,924
py
from os import system alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] logo = """ ,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba, a8" "" "" `Y8 a8P_____88 I8[ "" "" `Y8 88P' "Y8 8b ,adPPPPP88 8PP""""""" `"Y8ba, ,adPPPPP88 88 "8a, ,aa 88, ,88 "8b, ,aa aa ]8I 88, ,88 88 `"Ybbd8"' `"8bbdP"Y8 `"Ybbd8"' `"YbbdP"' `"8bbdP"Y8 88 88 88 "" 88 88 ,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba, a8" "" 88 88P' "8a 88P' "8a a8P_____88 88P' "Y8 8b 88 88 d8 88 88 8PP""""""" 88 "8a, ,aa 88 88b, ,a8" 88 88 "8b, ,aa 88 `"Ybbd8"' 88 88`YbbdP"' 88 88 `"Ybbd8"' 88 88 88 """ end = False while not end: print(logo) direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n") text = input("Type your message:\n").lower() shift = int(input("Type the shift number:\n")) def caesar(direction, text, shift): output_text = "" if shift > 26: shift %= 26 if direction == "encode": for i in text: if i in alphabet: index = alphabet.index(i) if index + shift > 25: output_text += alphabet[index + shift - 26] else: output_text += alphabet[index + shift] else: output_text += i elif direction == "decode": for i in text: if i in alphabet: index = alphabet.index(i) if index - shift < 0: output_text += alphabet[index - shift + 26] else: output_text += alphabet[index - shift] else: output_text += i print(f"The {direction}d text is {output_text}") caesar(direction, text, shift) if input("Type 'yes' if you want to go again. Otherwise type 'no'.\n").lower() == "no": end = True else: system('clear')
ceb0c0134cb3480fdab988077750fcef69ee298d
f8ea3582884df87172cb747e424ebd0c20223614
/(sandbox,tobemerged)/setup.py
bfdf53b7357b1a52aaad77a7986bc61cc1b5ddd9
[ "MIT" ]
permissive
karimbahgat/PythonGis
94f52f800a769ee54b12c7277604ead011465321
fb99148a15bcbe0438ddca67b484a15076bd961a
refs/heads/master
2023-04-12T15:59:08.522464
2022-09-09T22:48:32
2022-09-09T22:48:32
47,153,255
5
1
null
null
null
null
UTF-8
Python
false
false
1,260
py
############ ### allow building the exe by simply running this script import sys sys.argv.append("py2exe") ############ ### imports from distutils.core import setup import py2exe ########### ### options WINDOWS = [{"script": "guitester.py", "icon_resources": [(1,"pythongis/app/logo.ico")] }] OPTIONS = {"skip_archive": True, "dll_excludes": ["python26.dll","python27.so"]} ########### ### create the application icon ##import PIL, PIL.Image ##img = PIL.Image.open("icon.png") ##img.save("icon.ico", sizes=[(255,255),(128,128),(64,64),(48,48),(32,32),(16,16),(8,8)]) ########### ### build setup(windows=WINDOWS, options={"py2exe": OPTIONS} ) ########### ### manually copy pythongis package to dist ### ...because py2exe may not copy all files import os import shutil frompath = "pythongis" topath = os.path.join("dist","pythongis") shutil.rmtree(topath) # deletes the folder copied by py2exe shutil.copytree(frompath, topath) ########### ### and same with dependencies for dependname in os.listdir("dependencies"): frompath = os.path.join("dependencies", dependname) topath = os.path.join("dist", dependname) shutil.rmtree(topath) # deletes the folder copied by py2exe shutil.copytree(frompath, topath)
d519581682c5b4acb68ab1878e3cda3a7b8c4ddd
5e2655fb23e558c54695dea5c9456b5552570947
/localdev/seed/management/commands/seed_data.py
f42ad2be00ea5d9f4f5111900de0d82b66bf4e16
[ "BSD-3-Clause" ]
permissive
mitodl/bootcamp-ecommerce
992cb23243462d82c75cfae6c115a27728491219
339c67b84b661a37ffe32580da72383d95666c5c
refs/heads/master
2023-08-31T10:45:57.827990
2023-07-25T13:55:32
2023-07-25T13:55:32
82,849,185
6
3
BSD-3-Clause
2023-08-24T20:25:47
2017-02-22T20:27:24
Python
UTF-8
Python
false
false
709
py
"""Management command to create or update seed data""" from django.core.management.base import BaseCommand from localdev.seed.api import create_seed_data from localdev.seed.utils import get_raw_seed_data_from_file class Command(BaseCommand): """Creates or updates seed data based on a raw seed data file""" help = __doc__ def handle(self, *args, **options): raw_seed_data = get_raw_seed_data_from_file() results = create_seed_data(raw_seed_data) if not results.has_results: self.stdout.write(self.style.WARNING("No results logged.")) else: self.stdout.write(self.style.SUCCESS("RESULTS")) self.stdout.write(results.report)
c7040497fddc70804c791aa8caffd6ee49621d0d
98c6ea9c884152e8340605a706efefbea6170be5
/examples/data/Assignment_2/hbbirf001/question3.py
86f1543deb5d0d08303893c1de5d53fe0d63e38e
[]
no_license
MrHamdulay/csc3-capstone
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
refs/heads/master
2021-03-12T21:55:57.781339
2014-09-22T02:22:22
2014-09-22T02:22:22
22,372,174
0
0
null
null
null
null
UTF-8
Python
false
false
260
py
import math pi =2 denom = math.sqrt(2) while denom != 2: pi = pi*2/denom denom = math.sqrt(2+denom) print('Approximation of pi:',round(pi,3),sep=' ') radius = eval(input('Enter the radius:\n')) area = pi*radius**2 print('Area:', round(area,3))
907107ef98f88293e5eab6076021cbe6900e6c7d
44acca58155b0a5a2b46d6a9ed255befece4f5d1
/api_vendas/api_vendas/wsgi.py
298a0f3193ddd7ce468b07db9e5f06b15df79e98
[]
no_license
GeovaneCavalcante/appHubVendas
6f6c74cb2f94b2534ab1c3d0f241422fb88b81f4
068bb08e2a270d132e60502c35edc11a4526f671
refs/heads/master
2020-03-20T07:22:32.555287
2018-06-13T22:38:53
2018-06-13T22:38:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
398
py
""" WSGI config for api_vendas project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api_vendas.settings") application = get_wsgi_application()
65fc92a79bd813ef453b821d8a02b1a20e6cd577
e588da296dd6ec3bedee9d24444dfca6e8780aef
/classroom examples/10.py
ab241aa1417f606aba6c9459a043d03a16b9e3e0
[]
no_license
sujith1919/TCS-Python
98eac61a02500a0e8f3139e431c98a509828c867
c988cf078616540fe7f56e3ebdfd964aebd14519
refs/heads/master
2023-03-02T09:03:10.052633
2021-02-02T16:40:18
2021-02-02T16:40:18
335,355,862
0
0
null
null
null
null
UTF-8
Python
false
false
238
py
import time import os starttime=time.time() for x in range(1000): x**x endtime=time.time() print(endtime-starttime) time.sleep(1) #sleeps for 1 second ts = os.path.getctime("10.py") print(ts) print(time.ctime(ts))
22256ba682801c86d92e53c516104a2ac18db1fd
b27b26462524984951bfbab9250abd145ecfd4c8
/Demoing/stage_two/bloomingtonnormal/craigslist_sample/craigslist_sample/spiders/craigslist_spider.py
9ccd525099e5b2802a2344337a1293d1d28242f0
[]
no_license
afcarl/fastTraffickingGrab
cb813d066f1f69f359598e0b55e632dafd273c89
9ff274cb7c9b6c7b60d1436c209b2bfc5907267d
refs/heads/master
2020-03-26T06:21:21.404931
2014-08-16T12:38:29
2014-08-16T12:38:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,042
py
from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.selector import HtmlXPathSelector from craigslist_sample.items import CraigslistSampleItem class CraigslistSpider(CrawlSpider): name = "craigslist" allowed_domains = ["craigslist.org"] start_urls = [ "http://bn.craigslist.org", "http://bn.craigslist.org/cas/", "http://bn.craigslist.org/cas/index100.html", "http://bn.craigslist.org/cas/index200.html", "http://bn.craigslist.org/cas/index300.html", "http://bn.craigslist.org/cas/index400.html", "http://bn.craigslist.org/cas/index500.html", "http://bn.craigslist.org/cas/index600.html", "http://bn.craigslist.org/cas/index700.html", "http://bn.craigslist.org/cas/index800.html", "http://bn.craigslist.org/cas/index900.html", "http://bn.craigslist.org/cas/index1000.html", "http://bn.craigslist.org/cas/index1100.html", "http://bn.craigslist.org/cas/index1200.html", "http://bn.craigslist.org/cas/index1300.html", "http://bn.craigslist.org/cas/index1400.html", "http://bn.craigslist.org/cas/index1500.html", "http://bn.craigslist.org/cas/index1600.html", "http://bn.craigslist.org/cas/index1700.html", "http://bn.craigslist.org/cas/index1800.html", "http://bn.craigslist.org/cas/index1900.html", "http://bn.craigslist.org/cas/index2000.html", "http://bn.craigslist.org/cas/index2100.html", "http://bn.craigslist.org/cas/index2200.html", "http://bn.craigslist.org/cas/index2300.html", "http://bn.craigslist.org/cas/index2400.html", "http://bn.craigslist.org/cas/index2500.html", "http://bn.craigslist.org/cas/index2600.html", "http://bn.craigslist.org/cas/index2700.html", "http://bn.craigslist.org/cas/index2800.html", "http://bn.craigslist.org/cas/index2900.html", "http://bn.craigslist.org/cas/index3000.html", "http://bn.craigslist.org/cas/index3100.html", "http://bn.craigslist.org/cas/index3200.html", "http://bn.craigslist.org/cas/index3300.html", "http://bn.craigslist.org/cas/index3400.html", "http://bn.craigslist.org/cas/index3500.html", "http://bn.craigslist.org/cas/index3600.html", "http://bn.craigslist.org/cas/index3700.html", "http://bn.craigslist.org/cas/index3800.html", "http://bn.craigslist.org/cas/index3900.html", "http://bn.craigslist.org/cas/index4000.html", "http://bn.craigslist.org/cas/index4100.html", "http://bn.craigslist.org/cas/index4200.html", "http://bn.craigslist.org/cas/index4300.html", "http://bn.craigslist.org/cas/index4400.html", "http://bn.craigslist.org/cas/index4500.html", "http://bn.craigslist.org/cas/index4600.html", "http://bn.craigslist.org/cas/index4700.html", "http://bn.craigslist.org/cas/index4800.html", "http://bn.craigslist.org/cas/index4900.html", "http://bn.craigslist.org/cas/index5000.html", "http://bn.craigslist.org/cas/index5100.html", "http://bn.craigslist.org/cas/index5200.html", "http://bn.craigslist.org/cas/index5300.html", "http://bn.craigslist.org/cas/index5400.html", "http://bn.craigslist.org/cas/index5500.html", "http://bn.craigslist.org/cas/index5600.html", "http://bn.craigslist.org/cas/index5700.html", "http://bn.craigslist.org/cas/index5800.html", "http://bn.craigslist.org/cas/index5900.html", "http://bn.craigslist.org/cas/index6000.html", "http://bn.craigslist.org/cas/index6100.html", "http://bn.craigslist.org/cas/index6200.html", "http://bn.craigslist.org/cas/index6300.html", "http://bn.craigslist.org/cas/index6400.html", "http://bn.craigslist.org/cas/index6500.html", "http://bn.craigslist.org/cas/index6600.html", "http://bn.craigslist.org/cas/index6700.html", "http://bn.craigslist.org/cas/index6800.html", "http://bn.craigslist.org/cas/index6900.html", "http://bn.craigslist.org/cas/index7000.html", "http://bn.craigslist.org/cas/index7100.html", "http://bn.craigslist.org/cas/index7200.html", "http://bn.craigslist.org/cas/index7300.html", "http://bn.craigslist.org/cas/index7400.html", "http://bn.craigslist.org/cas/index7500.html", "http://bn.craigslist.org/cas/index7600.html", "http://bn.craigslist.org/cas/index7700.html", "http://bn.craigslist.org/cas/index7800.html", "http://bn.craigslist.org/cas/index7900.html", "http://bn.craigslist.org/cas/index8000.html", "http://bn.craigslist.org/cas/index8100.html", "http://bn.craigslist.org/cas/index8200.html", "http://bn.craigslist.org/cas/index8300.html", "http://bn.craigslist.org/cas/index8400.html", "http://bn.craigslist.org/cas/index8500.html", "http://bn.craigslist.org/cas/index8600.html", "http://bn.craigslist.org/cas/index8700.html", "http://bn.craigslist.org/cas/index8800.html", "http://bn.craigslist.org/cas/index8900.html", "http://bn.craigslist.org/cas/index9000.html", "http://bn.craigslist.org/cas/index9100.html", "http://bn.craigslist.org/cas/index9200.html", "http://bn.craigslist.org/cas/index9300.html", "http://bn.craigslist.org/cas/index9400.html", "http://bn.craigslist.org/cas/index9500.html", "http://bn.craigslist.org/cas/index9600.html", "http://bn.craigslist.org/cas/index9700.html", "http://bn.craigslist.org/cas/index9800.html", "http://bn.craigslist.org/cas/index9900.html" ] rules = (Rule(SgmlLinkExtractor(allow=(),restrict_xpaths=('//a')), callback="parse", follow= True),) def parse(self, response): hxs = HtmlXPathSelector(response) titles = hxs.select("//span[@class='pl']") date_info = hxs.select("//h4[@class='ban']/span[@class='bantext']/text()") items = [] file_to = open("things.txt","a") file_to.write(response.body) for titles in titles: item = CraigslistSampleItem() item ["title"] = titles.select("a/text()").extract() item ["link"] = titles.select("a/@href").extract() item ["date"] = date_info.extract() items.append(item) return items
07b8a5019433683f2a6f9216935aaa0a5caa2f35
f0b75bd94f133a13f469f429a696f26be3be9862
/week 2/.history/python_second_assignment_20200204163718.py
b9cd1fdfdd8aa3efdde2ac692d9c4aefc42371f3
[]
no_license
dechavez4/Python_handin_assignments
023350fabd212cdf2a4ee9cd301306dc5fd6bea0
82fd8c991e560c18ecb2152ea5a8fc35dfc3c608
refs/heads/master
2023-01-11T23:31:27.220757
2020-05-22T10:33:56
2020-05-22T10:33:56
237,179,899
0
0
null
2022-12-30T20:14:04
2020-01-30T09:30:16
Python
UTF-8
Python
false
false
2,196
py
import csv from sys import argv import platform import argparse import os.path from os import path # Create a python file with 3 functions: # A. def print_file_content(file) that can print content of a csv file to the console def print_file_content(file): with open(file) as csv_file: content = csv_file.readlines() for line in content[:20]: print(line.strip().split(',')) # kan overskrive den gamle file. # B. def write_list_to_file(output_file, lst) that can take a list of tuple and write each element to a new line in file def write_list_to_file(output_file, *lst): if platform.system() == 'Windows': newline='' else: newline=None with open (output_file, 'w', newline=newline) as output_file: output_writer = csv.writer(output_file) for ele in lst: output_writer.writerow(ele) # C. def read_csv(input_file) that take a csv file and read each row into a list def read_line(file): with open(file) as file_object: lines = file_object.readlines() print(lines) for line in lines: print(line.rstrip()) # 2. Add a functionality so that the file can be called from cli with 2 arguments def run(): if args.print: print_file_content(argv[2]) if args.write: write_list_to_file(argv[2], argv[3:]) if args.read: read_line(argv[2]) if args.file: path.exists(argv[2]) write_list_to_file(argv[2], argv[3:]) else: print("file doesnt exist", argv[2]) if __name__ == '__main__': parser = argparse.ArgumentParser(description="this is my menu") parser.add_argument("--print", help='function that can print content of a csv file to the console') parser.add_argument("--write", nargs="*", help='function that can take a list of tuple and write each element to a new line in file') parser.add_argument("--read", help='function that take a csv file and read each row into a list') parser.add_argument("-- ", nargs="*", help="an argument that if given will write the content to file_name or otherwise will print it to the console.") args = parser.parse_args() run()
8f0ea1ddcb842afbdfefab10bdc1a50be19625f3
a140b45f9f16b74353d15ed573ea765b3fef046d
/algorithms/leet.0693.src.1.py
04b92c007caace7e60b187ff08050dfd9eefba49
[]
no_license
fish-ball/leetcode
258d4b37f05560d914bcd29f7c54820deeadb33f
3dfd8f73c65d43cc2766c20700a619141acb927b
refs/heads/master
2023-05-28T18:32:43.638675
2023-05-20T04:25:23
2023-05-20T04:25:23
31,968,994
1
0
null
null
null
null
UTF-8
Python
false
false
207
py
class Solution: def hasAlternatingBits(self, n: int) -> bool: if n <= 2: return True if n & 3 in (3, 0): return False return self.hasAlternatingBits(n>>1)
5fa3c9d9bb0d62ebb1c3fba841f5fde8baeb38ba
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/tDswMNY7X9h7tyTS4_22.py
cf345fc278bf3cb0fa4a9810e75fe0ead3c22a1a
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
1,116
py
""" **Mubashir** was reading about [Pascal's triangle](https://en.wikipedia.org/wiki/Pascal's_triangle) on Wikipedia. In mathematics, Pascal's triangle is a triangular array of the binomial coefficients that arises in probability theory, combinatorics, and algebra. ![Mubashir](https://edabit- challenges.s3.amazonaws.com/PascalTriangleAnimated2.gif) Formula for Pascal's triangle is given by: ![Mubashir](https://edabit-challenges.s3.amazonaws.com/jbderjvbv.png) where `n` denotes a row of the triangle, and `k` is the position of a term in the row. Create a function which takes a number `n` and returns **n top rows** of Pascal's Triangle flattened into a one-dimensional list. ### Examples pascals_triangle(1) ➞ [1] pascals_triangle(2) ➞ [1, 1, 1] pascals_triangle(4) ➞ [1, 1, 1, 1, 2, 1, 1, 3, 3, 1] ### Notes N/A """ import math def pascals_triangle(n): triangle = [] for row in range(n): new_row = [] for k in range(row+1): new_row.append(math.factorial(row)//(math.factorial(k)*math.factorial(row-k))) triangle += new_row return triangle
dc0795e8588404f2f441e385ff7792de19d21846
f0e0c1637f3b49fd914410361c3f1f3948462659
/Python/Math/integers_come_in_all_sizes.py
067bf933bb0a96f4a2758091ba2df74899b1be13
[]
no_license
georggoetz/hackerrank-py
399bcd0599f3c96d456725471708068f6c0fc4b1
a8478670fcc65ca034df8017083269cb37ebf8b0
refs/heads/master
2021-09-18T07:47:32.224981
2018-07-11T09:24:49
2018-07-11T09:24:49
111,611,930
0
0
null
null
null
null
UTF-8
Python
false
false
369
py
# http://www.hackerrank.com/contests/python-tutorial/challenges/python-integers-come-in-all-sizes def solve(a, b, c, d): """ >>> solve(9, 29, 7, 27) 4710194409608608369201743232 """ print(a ** b + c ** d) if __name__ == "__main__": a = int(input()) b = int(input()) c = int(input()) d = int(input()) print(solve(a, b, c, d))
e70f14eb83da74ee83dd9e8854f5f79da094837c
fb783dda8d0ca34ad95d0c3f7ebbb6794a4b0467
/ball.py
a2d479dde631ec996cf01de0feb2431d739b6875
[]
no_license
Loai17/Y--Project
967ec843ccc033fcdfdb59bd676adcfbea397446
155e9820bfa42c13e3dc7a82976146b1b86505ce
refs/heads/master
2020-04-12T16:29:00.322136
2018-12-27T10:34:39
2018-12-27T10:34:39
162,613,892
0
0
null
null
null
null
UTF-8
Python
false
false
1,014
py
from turtle import * class Ball(Turtle): def __init__(self,x,y,dx,dy,r,color): Turtle.__init__(self) self.dx=dx self.dy=dy self.r=r self.pu() self.goto(x,y) self.shape("circle") self.shapesize(r*r) self.color(color) print(self.xcor()) print(self.ycor()) def move(self,screen_width,screen_height): current_x = self.xcor() new_x = current_x + self.dx current_y = self.ycor() new_y = current_y + self.dy right_side_ball = new_x + self.r left_side_ball = new_x - self.r top_side_ball = new_y + self.r bottom_side_ball = new_y - self.r self.goto(new_x,new_y) if (current_x >= screen_width/2): self.dx = -self.dx elif(current_x <= (-screen_width/2)): self.dx = -self.dx if(current_y >= screen_height/2): self.dy = -self.dy elif(current_y <= (-screen_height/2)): self.dy = -self.dy
aa2a8c8f570a1c0f44928db8d59780469b207993
4f97122844fb8cbaccf9ed9fa300a27a290d1a37
/1/111.py
0a46d7eb3eec2fe044cfcd027f9ffbf0dbd17e63
[]
no_license
cq146637/Advanced
52d97ab0f8e7ec85e6d81692e92bad967af066e6
18380e5c51124ef1e6d243ae216280b49edc7001
refs/heads/master
2020-03-22T03:05:02.960444
2018-07-02T08:30:27
2018-07-02T08:30:27
139,151,714
0
0
null
null
null
null
UTF-8
Python
false
false
834
py
__author__ = 'Cq' from collections import deque import pickle from random import randint import os result = randint(1,100) print("result is ",result) deque1 = deque([],5) if os.path.isfile("save.data"): deque1 = pickle.load(open("save.data")) while True: k = input("\nplease input your guess number: ") if k.isdigit(): k = int(k) elif k == 'h' or k == 'H': print("your input history is ",list(deque1)) else: continue if k != result: if k > result: print("your number is greater than result\n") else: print("your number is less than result\n") deque1.append(k) else: print("It was good result...") deque1.append(k) break if k == 100: break f = open("save.data",'w') pickle.dump(deque1, f)
4148ba0011b8da0c23ac14048f68d96a7d5a144f
ed7f2c5c235d1a3beca2ad78f8ef6eecd9afeea6
/src/motors.py
d32ecdccd49447815025fb0116e63b984bb1da0e
[]
no_license
mvwicky/roboSim
8f48bdfa291cfe6abc1c6a7294c7ab59161e3304
c4d5d5f641ed976c71a591085019fcedc2ec3a5a
refs/heads/master
2016-09-10T10:40:11.235120
2014-08-19T18:51:06
2014-08-19T18:51:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,741
py
import os import sys import random import math import utilFunctions as utlF class motor(object): """Generic motor object""" def __init__(self,port,wheelRad=0,ticks=1000,tolerance=0,sprite=None): """port:which port the motor is in wheelRad:the radius of the attached wheel if not a drive motor: wheelRad=0 ticks:number of ticks per revolution tolerance: sprite:path to the sprite """ self.port=port self.positon=0 self.wheelRad=wheelRad self.ticks=ticks self.lastSpeed=0 self.currentSpeed=0 #self.distPerTick self.context=None if sprite==None: pass elif sprite!=None and type(sprite)!=utlF.sprite: print("Invalid sprite") elif sprite!=None and type(sprite)==utlF.sprite: self.sprite=sprite self.tolerance=tolerance def update(self): if self.context==None: print("Context not defined") return -1 else: pass def draw(self): pass def moveAtVelocity(self,velocity): self.currentSpeed=velocity return 0 def moveRelativePosition(self,velocity,delta): pass def moveToPosition(self,velocity,position): pass def moveAngleDeg(self,velocity,theta): pass def moveAngleRad(self,velocity,theta): pass def getPosition(self): pass def forward(self): pass def off(self): pass def zeroMotor(self): """Sets the motor position back to zero""" pass def mav(self,velocity): return self.moveAtVelocity(velocity) def mrp(self,velocity,position): return self.moveRelativePosition(velocity,position) def mtp(self,velocity,position): return self.moveToPosition(velocity,position) def mad(self,velocity,theta): return self.moveAngleDeg(velocity,theta) def mar(self,velocity,theta): return self.moveAngleRad(velocity,theta)
2ea42ed75506284aeaca6832127c5ac1f95139ab
c23b4c6253ca5a0d42822dd0d28ffa752c11ebf5
/exercises/c3ec2a04-cbca-459a-951f-f17cc34310c7/skeletons/8fd3c5ac-35d2-40cd-9d21-77a4a6671d7c/skeleton4.py3
e2a36f52de4511c924c13798bc533064cd0477c9
[]
no_license
josepaiva94/e57d8867-6234-41a6-b239-2cd978ad1e70
803e2eb1e2db23c64409bc72ff00c4463875a82f
aa270941dd8cf7b2e1ec8ac89445b1ab3a47f89d
refs/heads/master
2023-01-07T10:49:56.871378
2020-11-16T11:28:14
2020-11-16T11:28:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
134
py3
if largest != root_index: nums[root_index], nums[largest] = nums[largest], nums[root_index] heapify(nums, heap_size, largest)
b8b5d53aedd215e4c38db5455b764f4b73bb83b5
3420aba3622faf2d4aede984c656f68ad24a1f3c
/backend/personal_care_22730/settings.py
230da7088fe365290e5935afd842c015a2ea9d7d
[]
no_license
crowdbotics-apps/personal-care-22730
bb81af122e64cb58f6d52df31df328b6dfa4b25d
066d2cd5e890057df054ea7c5b3b5f061e872371
refs/heads/master
2023-01-11T06:30:05.971088
2020-11-18T16:23:30
2020-11-18T16:23:30
313,990,783
0
0
null
null
null
null
UTF-8
Python
false
false
7,048
py
""" Django settings for personal_care_22730 project. Generated by 'django-admin startproject' using Django 2.2.2. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os import environ import logging env = environ.Env() # SECURITY WARNING: don't run with debug turned on in production! DEBUG = env.bool("DEBUG", default=False) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = env.str("SECRET_KEY") ALLOWED_HOSTS = env.list("HOST", default=["*"]) SITE_ID = 1 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False) # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", "django.contrib.sites", "healthcare", ] LOCAL_APPS = [ "home", "users.apps.UsersConfig", ] THIRD_PARTY_APPS = [ "rest_framework", "rest_framework.authtoken", "rest_auth", "rest_auth.registration", "bootstrap4", "allauth", "allauth.account", "allauth.socialaccount", "allauth.socialaccount.providers.google", "django_extensions", "drf_yasg", "storages", # start fcm_django push notifications "fcm_django", # end fcm_django push notifications ] INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] ROOT_URLCONF = "personal_care_22730.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "personal_care_22730.wsgi.application" # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join(BASE_DIR, "db.sqlite3"), } } if env.str("DATABASE_URL", default=None): DATABASES = {"default": env.db()} # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = "/static/" MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"] AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles") STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")] STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" # allauth / users ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_AUTHENTICATION_METHOD = "email" ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_EMAIL_VERIFICATION = "optional" ACCOUNT_CONFIRM_EMAIL_ON_GET = True ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True ACCOUNT_UNIQUE_EMAIL = True LOGIN_REDIRECT_URL = "users:redirect" ACCOUNT_ADAPTER = "users.adapters.AccountAdapter" SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter" ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True) SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True) REST_AUTH_SERIALIZERS = { # Replace password reset serializer to fix 500 error "PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer", } REST_AUTH_REGISTER_SERIALIZERS = { # Use custom serializer that has no username and matches web signup "REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer", } # Custom user model AUTH_USER_MODEL = "users.User" EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net") EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "") EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "") EMAIL_PORT = 587 EMAIL_USE_TLS = True # AWS S3 config AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "") AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "") AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "") AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "") USE_S3 = ( AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_STORAGE_BUCKET_NAME and AWS_STORAGE_REGION ) if USE_S3: AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "") AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"} AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read") AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media") AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True) DEFAULT_FILE_STORAGE = env.str( "DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage" ) MEDIA_URL = "/mediafiles/" MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles") # start fcm_django push notifications FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")} # end fcm_django push notifications # Swagger settings for api docs SWAGGER_SETTINGS = { "DEFAULT_INFO": f"{ROOT_URLCONF}.api_info", } if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD): # output email to console instead of sending if not DEBUG: logging.warning( "You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails." ) EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
56c15e78ef411bada79abd374bd7d67e36ff9929
234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5
/build/PureCloudPlatformClientV2/models/workday_values_trend.py
2cd0430e7aa02ba9e1bd12ac92f707c225c96002
[ "Apache-2.0", "MIT" ]
permissive
humano7/platform-client-sdk-python
2a942c43cc2d69e8cb0c4113d998e6e0664fdedb
dd5b693b1fc90c9dcb36885d7227f11221db5980
refs/heads/master
2023-04-12T05:05:53.932393
2021-04-22T03:41:22
2021-04-22T03:41:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,713
py
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re import json from ..utils import sanitize_for_serialization class WorkdayValuesTrend(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ WorkdayValuesTrend - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'date_start_workday': 'date', 'date_end_workday': 'date', 'division': 'Division', 'user': 'UserReference', 'timezone': 'str', 'results': 'list[WorkdayValuesMetricItem]' } self.attribute_map = { 'date_start_workday': 'dateStartWorkday', 'date_end_workday': 'dateEndWorkday', 'division': 'division', 'user': 'user', 'timezone': 'timezone', 'results': 'results' } self._date_start_workday = None self._date_end_workday = None self._division = None self._user = None self._timezone = None self._results = None @property def date_start_workday(self): """ Gets the date_start_workday of this WorkdayValuesTrend. The start workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd :return: The date_start_workday of this WorkdayValuesTrend. :rtype: date """ return self._date_start_workday @date_start_workday.setter def date_start_workday(self, date_start_workday): """ Sets the date_start_workday of this WorkdayValuesTrend. The start workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd :param date_start_workday: The date_start_workday of this WorkdayValuesTrend. :type: date """ self._date_start_workday = date_start_workday @property def date_end_workday(self): """ Gets the date_end_workday of this WorkdayValuesTrend. The end workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd :return: The date_end_workday of this WorkdayValuesTrend. :rtype: date """ return self._date_end_workday @date_end_workday.setter def date_end_workday(self, date_end_workday): """ Sets the date_end_workday of this WorkdayValuesTrend. The end workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd :param date_end_workday: The date_end_workday of this WorkdayValuesTrend. :type: date """ self._date_end_workday = date_end_workday @property def division(self): """ Gets the division of this WorkdayValuesTrend. The targeted division for the query :return: The division of this WorkdayValuesTrend. :rtype: Division """ return self._division @division.setter def division(self, division): """ Sets the division of this WorkdayValuesTrend. The targeted division for the query :param division: The division of this WorkdayValuesTrend. :type: Division """ self._division = division @property def user(self): """ Gets the user of this WorkdayValuesTrend. The targeted user for the query :return: The user of this WorkdayValuesTrend. :rtype: UserReference """ return self._user @user.setter def user(self, user): """ Sets the user of this WorkdayValuesTrend. The targeted user for the query :param user: The user of this WorkdayValuesTrend. :type: UserReference """ self._user = user @property def timezone(self): """ Gets the timezone of this WorkdayValuesTrend. The time zone used for aggregating metric values :return: The timezone of this WorkdayValuesTrend. :rtype: str """ return self._timezone @timezone.setter def timezone(self, timezone): """ Sets the timezone of this WorkdayValuesTrend. The time zone used for aggregating metric values :param timezone: The timezone of this WorkdayValuesTrend. :type: str """ self._timezone = timezone @property def results(self): """ Gets the results of this WorkdayValuesTrend. The metric value trends :return: The results of this WorkdayValuesTrend. :rtype: list[WorkdayValuesMetricItem] """ return self._results @results.setter def results(self, results): """ Sets the results of this WorkdayValuesTrend. The metric value trends :param results: The results of this WorkdayValuesTrend. :type: list[WorkdayValuesMetricItem] """ self._results = results def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_json(self): """ Returns the model as raw JSON """ return json.dumps(sanitize_for_serialization(self.to_dict())) def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
491a1f11b35ab27ff0eb2c2ce7bb95b422862b4a
ed7cd7760c708720f5a847a02b0c3a50cca0175e
/docs/conf.py
c6db3e446649d27013be9c86061f2f9677830789
[ "MIT" ]
permissive
jcapriot/aurora
bf98b1236e7dc43e0189df71725f7f862d271984
08d5ccc671054a2b646a4effb412a2ed48314646
refs/heads/main
2023-09-05T00:07:16.984109
2021-10-27T02:49:41
2021-10-27T02:49:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,585
py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import aurora from sphinx_gallery.sorting import FileNameSortKey # -- Project information ----------------------------------------------------- project = 'aurora' copyright = '2021, Karl Kappler, Jared Peacock, Lindsey Heagy, Douglas Oldenburg' author = 'Karl Kappler, Jared Peacock, Lindsey Heagy, Douglas Oldenburg' # The full version, including alpha/beta/rc tags release = '0.0.1' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.todo", "sphinx.ext.viewcode", "matplotlib.sphinxext.plot_directive", "numpydoc", # "nbsphinx", "sphinx_gallery.gen_gallery" ] # Autosummary pages will be generated by sphinx-autogen instead of sphinx-build autosummary_generate = True numpydoc_class_members_toctree = False # API doc options apidoc_module_dir = "../aurora" apidoc_output_dir = "api/generated" apidoc_toc_file = False apidoc_excluded_paths = [] apidoc_separate_modules = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # try: import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] pass except Exception: html_theme = "default" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Intersphinx intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "numpy": ("https://docs.scipy.org/doc/numpy/", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), "matplotlib": ("https://matplotlib.org/", None), } # Sphinx Gallery sphinx_gallery_conf = { # path to your examples scripts "examples_dirs": [ "../examples", ], "gallery_dirs": [ "examples", ], "within_subsection_order": FileNameSortKey, "filename_pattern": "\.py", "backreferences_dir": "api/generated/backreferences", "doc_module": "aurora", # 'reference_url': {'discretize': None}, }
13f0735af7afa71669e0b00ec47e9d7d07d8bce0
d5214b1331c9dae59d95ba5b3aa3e9f449ad6695
/qPloneDropDownMenu/branches/0.2/skins/qPloneDropDownMenu/qpdm_reorder.py
e0bfe0d169c75bd9dae28edd63c26790aeb59ec2
[]
no_license
kroman0/products
1661ee25a224c4b5f172f98110944f56136c77cf
f359bb64db22f468db5d1e411638790e94d535a2
refs/heads/master
2021-01-10T07:58:04.579234
2014-06-11T12:05:56
2014-06-11T12:05:56
52,677,831
0
0
null
null
null
null
UTF-8
Python
false
false
473
py
## Script (Python) "qpdm_reorder" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters= submenu_path, idxs ##title= ## from Products.CMFCore.utils import getToolByName menu_tool = getToolByName(context, 'portal_dropdownmenu') menuitem = menu_tool.manage_reorderItems(idxs, submenu_path) return context.getSubmenu(submenu=menu_tool.getSubMenuByPath(submenu_path),submenu_path=submenu_path)
[ "mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946" ]
mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946
432aae4837c6d251b61eb69326fd327cebce4c6c
a63d907ad63ba6705420a6fb2788196d1bd3763c
/src/api/resourcecenter/serializers/processing_metrics_serializers.py
bb72d5540d96efd33b60750a04d702611cbf0b03
[ "MIT" ]
permissive
Tencent/bk-base
a38461072811667dc2880a13a5232004fe771a4b
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
refs/heads/master
2022-07-30T04:24:53.370661
2022-04-02T10:30:55
2022-04-02T10:30:55
381,257,882
101
51
NOASSERTION
2022-04-02T10:30:56
2021-06-29T06:10:01
Python
UTF-8
Python
false
false
2,364
py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import datetime from django.utils.translation import ugettext as _ from rest_framework import serializers from common.exceptions import ValidationError class ProcessingMetricSummarySerializer(serializers.Serializer): start_time = serializers.CharField(label=_("开始日期")) end_time = serializers.CharField(label=_("结束日期")) geog_area_code = serializers.CharField(required=False, label=_("地区")) def validate_start_time(self, start_time): try: datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S") except ValueError: raise ValidationError(_("开始日期,格式为YYYY-MM-DD HH:mm:SS")) return start_time def validate_end_time(self, end_time): try: datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S") except ValueError: raise ValidationError(_("结束日期,格式为YYYY-MM-DD HH:mm:SS")) return end_time
ff96ced9ce7021a3e0768e0e4493dcaaee8df6fd
a6086dcd794ee1419081761e473433081249059f
/app/api/errors.py
9e92b5acf600ead372909b7faad5a3d73fe777ea
[]
no_license
billy0402/flask-stock-api
f1d6f51d7d67300eccc2d7621eacc41f3a8ec609
2d656c80b2a062f8dd4f7f8466ed3060f7d56477
refs/heads/master
2023-07-18T15:40:53.869479
2021-09-08T18:57:47
2021-09-08T18:57:47
402,569,924
1
0
null
null
null
null
UTF-8
Python
false
false
632
py
from flask import jsonify from . import api from ..exceptions import ValidationError def bad_request(message): response = jsonify({'error': 'bad request', 'message': message}) response.status_code = 400 return response def unauthorized(message): response = jsonify({'error': 'unauthorized', 'message': message}) response.status_code = 401 return response def forbidden(message): response = jsonify({'error': 'forbidden', 'message': message}) response.status_code = 403 return response @api.errorhandler(ValidationError) def validation_error(error): return bad_request(error.args[0])
d21c0896c06e1415355d55f1c6aa4eda00358cbc
46559fa48bb8ae722149b600ecd5e05e558553ac
/RumourEval2019Models/Bert-MFajcik/data_preprocessing/text_preprocessing.py
185113f8777ca3d00c738e17f3b504dde6cda8ea
[ "MIT" ]
permissive
isspek/veracity-detection
f84eeba6aceb8b2f3f753c5e856bb46d9581c0c5
9368309722bead209e49e52c206758e3d173092a
refs/heads/master
2022-07-15T10:25:10.327352
2019-11-14T13:24:55
2019-11-14T13:24:55
214,429,773
0
0
MIT
2022-06-21T23:08:54
2019-10-11T12:23:39
Python
UTF-8
Python
false
false
7,549
py
import re import string import warnings import preprocessor as twitter_preprocessor import spacy # See spacy tag_map.py for tag explanation from nltk.corpus import stopwords from spacy.symbols import PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, VERB, NOUN, PROPN, PART, PRON, ORTH from utils import DotDict warnings.filterwarnings("ignore", category=UserWarning, module='bs4') nlp = None punctuation = list(string.punctuation) + ["``"] stopWords = set(stopwords.words('english')) validPOS = [PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, VERB, NOUN, PROPN, PART, PRON] POS_dict = {x: i + 2 for i, x in enumerate(validPOS)} POS_dict['UNK'] = 0 POS_dict['EOS'] = 1 validNER = ["UNK", "PERSON", # People, including fictional. "NORP", # Nationalities or religious or political groups. "FAC", # Buildings, airports, highways, bridges, etc. "ORG", # Companies, agencies, institutions, etc. "GPE", # Countries, cities, states. "LOC", # Non-GPE locations, mountain ranges, bodies of water. "PRODUCT", # Objects, vehicles, foods, etc. (Not services.) "EVENT", # Named hurricanes, battles, wars, sports events, etc. "WORK_OF_ART", # Titles of books, songs, etc. "LAW", # Named documents made into laws. "LANGUAGE", # Any named language. "DATE", # Absolute or relative dates or periods. "TIME", # Times smaller than a day. "PERCENT", # Percentage, including "%". "MONEY", # Monetary values, including unit. "QUANTITY", # Measurements, as of weight or distance. "ORDINAL", # "first", "second", etc. "CARDINAL", # Numerals that do not fall under another type. ] validDEPS = ['UNK', 'acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp', 'complm', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'dobj', 'expl', 'hmod', 'hyph', 'infmod', 'intj', 'iobj', 'mark', 'meta', 'neg', 'nmod', 'nn', 'npadvmod', 'nsubj', 'nsubjpass', 'num', 'number', 'nummod', 'obj', 'obl', 'oprd', 'parataxis', 'partmod', 'pcomp', 'pobj', 'poss', 'possessive', 'preconj', 'predet', 'prep', 'prt', 'punct', 'quantmod', 'rcmod', 'relcl', 'root', 'xcomp'] def preprocess_text(text: str, opts, nlpengine=None, lang='en', special_tags=["<pad>", "<eos>"], use_tw_preprocessor=True): if use_tw_preprocessor: ## ! There is a bug in original package for twitter preprocessing # Sometomes regexp for link preprocessing freezes # So we preprocess links separately text = re.sub(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?", "$URL$", text.strip()) twitter_preprocessor.set_options('mentions') text = twitter_preprocessor.tokenize(text) # processed_chunk = twitter_preprocessor.clean(text) if nlpengine is None: global nlp if nlp is None: nlp = spacy.load(lang) nlp.add_pipe(nlp.create_pipe('sentencizer')) for x in ['URL', 'MENTION', 'HASHTAG', 'RESERVED', 'EMOJI', 'SMILEY', 'NUMBER', ]: nlp.tokenizer.add_special_case(f'${x}$', [{ORTH: f'${x}$'}]) nlpengine = nlp BLvec = [] POSvec = [] DEPvec = [] NERvec = [] processed_chunk = "" doc = nlpengine(text) doclen = 0 for sentence in doc.sents: for w in sentence: # Some phrases are automatically tokenized by Spacy # i.e. New York, in that case we want New_York in our dictionary word = "_".join(w.text.split()) if word.isspace() or word == "": continue if opts.remove_stop_words and word.lower() in stopWords: continue if opts.remove_puncuation and word in punctuation: continue # Spacy lemmatized I,He/She/It into artificial # -PRON- lemma, which is unwanted if opts.lemmatize_words: output = w.lemma_ if w.lemma_ != '-PRON-' else w.lower_ else: output = word if opts.to_lowercase: output = output.lower() if opts.replace_nums and output.replace('.', '', 1).isdigit(): output = opts.num_replacement output = output.replace("n't", "not") doclen += 1 processed_chunk += "%s " % (output) # Sometimes, when the word contains punctuation and we split it manually # the output can contain multiple tokens # In such case, just copy the features..., it happens rarely if opts.returnbiglettervector: BLvec.append(int(w.text[0].isupper())) if opts.returnposvector: POSvec.append(POS_dict.get(w.pos, POS_dict['UNK'])) if opts.returnDEPvector: try: DEPvec.append(validDEPS.index(w.dep_.lower())) except ValueError: DEPvec.append(validDEPS.index('UNK')) if opts.returnNERvector: try: NERvec.append(validNER.index(w.ent_type_)) except ValueError: NERvec.append(validNER.index('UNK')) if opts.add_eos: doclen += 1 processed_chunk += opts.eos + "\n" if opts.returnbiglettervector: BLvec.append(0) if opts.returnposvector: POSvec.append(POS_dict['EOS']) if opts.returnDEPvector: DEPvec.append(0) if opts.returnNERvector: NERvec.append(0) else: processed_chunk += "\n" processed_chunk = processed_chunk.strip() assert len(processed_chunk.split()) == len(BLvec) == len(POSvec) == len(DEPvec) == len(NERvec) return processed_chunk, BLvec, POSvec, DEPvec, NERvec def initopts(): o = DotDict() o.stopwords_file = "" o.remove_puncuation = False o.remove_stop_words = False o.lemmatize_words = False o.num_replacement = "[NUM]" o.to_lowercase = False o.replace_nums = False # Nums are important, since rumour may be lying about count o.eos = "[EOS]" o.add_eos = True o.returnNERvector = True o.returnDEPvector = True o.returnbiglettervector = True o.returnposvector = True return o if __name__ == "__main__": print(preprocess_text( "Appalled by the attack on Charlie Hebdo in Paris, 10 - probably journalists - now confirmed dead. An attack on free speech everywhere.", initopts()))
eedc1a1a7b87294894b34aefd03488bb442339be
33e5e4b883671f7f40a48e6e0a4b544b3f8f839a
/imageflow/apps.py
2b8872cb5e0adfd69a6677056fd89db00b564baa
[ "MIT" ]
permissive
typpo/astrokit
ad7ee83664e3d920733d7e008aec4801c7aa84f2
59cea2e06c027e83dfa70defb4053820c79ccced
refs/heads/master
2023-04-12T15:44:11.669710
2022-06-21T21:21:04
2022-06-21T21:21:04
47,933,931
9
7
MIT
2023-03-31T14:28:40
2015-12-13T19:52:01
Python
UTF-8
Python
false
false
134
py
from __future__ import unicode_literals from django.apps import AppConfig class ImageflowConfig(AppConfig): name = 'imageflow'
9b3ef03ef6d8de217adbc634e63f038ae42d5d52
0a3bf0a6f10eb143c9291090125946538ee73279
/summarize/sumy/summarizers/edmundson_location.py
406597f5a31d71b8b516c58c710328b273d06797
[ "Apache-2.0" ]
permissive
AIPHES/live-blog-summarization
19ec1c01b7e254f74b2de153ac3972780daa7506
a5f899ea07a098e1e0b3ab92cd3d430776e6412a
refs/heads/master
2022-11-24T09:39:25.750313
2019-02-12T13:53:12
2019-02-12T13:53:12
166,268,167
2
1
Apache-2.0
2022-11-02T20:47:14
2019-01-17T17:34:10
Python
UTF-8
Python
false
false
2,516
py
# -*- coding: utf8 -*- from __future__ import absolute_import from __future__ import division, print_function, unicode_literals from itertools import chain from operator import attrgetter from .._compat import ffilter from ._summarizer import AbstractSummarizer class EdmundsonLocationMethod(AbstractSummarizer): def __init__(self, stemmer, null_words): super(EdmundsonLocationMethod, self).__init__(stemmer) self._null_words = null_words def __call__(self, document, sentences_count, w_h, w_p1, w_p2, w_s1, w_s2): significant_words = self._compute_significant_words(document) ratings = self._rate_sentences(document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2) return self._get_best_sentences(document.sentences, sentences_count, ratings) def _compute_significant_words(self, document): headings = document.headings significant_words = chain(*map(attrgetter("words"), headings)) significant_words = map(self.stem_word, significant_words) significant_words = ffilter(self._is_null_word, significant_words) return frozenset(significant_words) def _is_null_word(self, word): return word in self._null_words def _rate_sentences(self, document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2): rated_sentences = {} paragraphs = document.paragraphs for paragraph_order, paragraph in enumerate(paragraphs): sentences = paragraph.sentences for sentence_order, sentence in enumerate(sentences): rating = self._rate_sentence(sentence, significant_words) rating *= w_h if paragraph_order == 0: rating += w_p1 elif paragraph_order == len(paragraphs) - 1: rating += w_p2 if sentence_order == 0: rating += w_s1 elif sentence_order == len(sentences) - 1: rating += w_s2 rated_sentences[sentence] = rating return rated_sentences def _rate_sentence(self, sentence, significant_words): words = map(self.stem_word, sentence.words) return sum(w in significant_words for w in words) def rate_sentences(self, document, w_h=1, w_p1=1, w_p2=1, w_s1=1, w_s2=1): significant_words = self._compute_significant_words(document) return self._rate_sentences(document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2)
3941489ec2a7e0de2b1adcec8caab3fafca2f3a0
4b4df51041551c9a855468ddf1d5004a988f59a2
/leetcode_python/Array/corporate-flight-bookings.py
d6486593ea2dc4f37b79869a1f72ef71fc6dc067
[]
no_license
yennanliu/CS_basics
99b7ad3ef6817f04881d6a1993ec634f81525596
035ef08434fa1ca781a6fb2f9eed3538b7d20c02
refs/heads/master
2023-09-03T13:42:26.611712
2023-09-03T12:46:08
2023-09-03T12:46:08
66,194,791
64
40
null
2022-08-20T09:44:48
2016-08-21T11:11:35
Python
UTF-8
Python
false
false
5,073
py
""" 1109. Corporate Flight Bookings Medium There are n flights that are labeled from 1 to n. You are given an array of flight bookings bookings, where bookings[i] = [firsti, lasti, seatsi] represents a booking for flights firsti through lasti (inclusive) with seatsi seats reserved for each flight in the range. Return an array answer of length n, where answer[i] is the total number of seats reserved for flight i. Example 1: Input: bookings = [[1,2,10],[2,3,20],[2,5,25]], n = 5 Output: [10,55,45,25,25] Explanation: Flight labels: 1 2 3 4 5 Booking 1 reserved: 10 10 Booking 2 reserved: 20 20 Booking 3 reserved: 25 25 25 25 Total seats: 10 55 45 25 25 Hence, answer = [10,55,45,25,25] Example 2: Input: bookings = [[1,2,10],[2,2,15]], n = 2 Output: [10,25] Explanation: Flight labels: 1 2 Booking 1 reserved: 10 10 Booking 2 reserved: 15 Total seats: 10 25 Hence, answer = [10,25] Constraints: 1 <= n <= 2 * 104 1 <= bookings.length <= 2 * 104 bookings[i].length == 3 1 <= firsti <= lasti <= n 1 <= seatsi <= 104 """ # V0 # V1 # IDEA : ARRAY + prefix sum # https://leetcode.com/problems/corporate-flight-bookings/discuss/328856/JavaC%2B%2BPython-Sweep-Line # IDEA : # Set the change of seats for each day. # If booking = [i, j, k], # it needs k more seat on ith day, # and we don't need these seats on j+1th day. # We accumulate these changes then we have the result that we want. # Complexity # Time O(booking + N) for one pass on bookings # Space O(N) for the result class Solution: def corpFlightBookings(self, bookings, n): res = [0] * (n + 1) for i, j, k in bookings: res[i - 1] += k res[j] -= k for i in range(1, n): res[i] += res[i - 1] return res[:-1] # V1' # IDEA : ARRAY + prefix sum # https://leetcode.com/problems/corporate-flight-bookings/discuss/328949/Simple-Python-solution class Solution: def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: answer = n * [0] lst = [] for i, j, num in bookings: lst.append((i - 1, num)) lst.append((j, -num)) lst.sort() curr_num = 0 prev_i = 0 for i, num in lst: for j in range(prev_i, i): answer[j] += curr_num prev_i = i curr_num += num return answer # V1'' # IDEA : ARRAY # https://leetcode.com/problems/corporate-flight-bookings/discuss/328893/Short-python-solution # IDEA : Simply use two arrays to keep track of how many bookings are added for every flight. class Solution: def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: opens = [0]*n closes = [0]*n for e in bookings: opens[e[0]-1] += e[2] closes[e[1]-1] += e[2] ret, tmp = [0]*n, 0 for i in range(n): tmp += opens[i] ret[i] = tmp tmp -= closes[i] return ret # V1''' # https://leetcode.com/problems/corporate-flight-bookings/discuss/328986/Python-linear-solution class Solution: def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: res = [0] * (n + 2) for booking in bookings: start, end, seats = booking res[start] += seats res[end + 1] -= seats for i in range(1, len(res)): res[i] += res[i - 1] # don't keep first because bookings are 1-based # don't keep last because it's out of range return res[1:-1] # V1'''' # https://leetcode.com/problems/corporate-flight-bookings/discuss/328863/Python-concise-sum class Solution: def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: res = [0] * n i = cur = 0 for j, val in sorted([[i - 1, k] for i, j, k in bookings] + [[j, -k] for i, j, k in bookings]): while i < j: res[i] = cur i += 1 cur += val return res # V1'''''' # https://zxi.mytechroad.com/blog/math/leetcode-1109-corporate-flight-bookings/ # C++ # class Solution { # public: # vector<int> corpFlightBookings(vector<vector<int>>& bookings, int n) { # vector<int> ans(n + 1); # for (const auto& b : bookings) { # ans[b[0] - 1] += b[2]; # ans[b[1]] -= b[2]; # } # for (int i = 1; i < n; ++i) # ans[i] += ans[i - 1]; # ans.pop_back(); # return ans; # } # }; # V1'''''''' # https://blog.51cto.com/u_15344287/3646723 class Solution: def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]: lst = [0] * (n + 1) for j, k, l in bookings: lst[j - 1] += l lst[k] -= l lst.pop() ans = [] now = 0 for i in range(len(lst)): now += lst[i] ans.append(now) return ans # V2
8da121d649ea828a915d2f8fee0f8d2f41569f13
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
/data/external/repositories_2to3/120243/tradeshift-text-classification-master/src/online-model/tk7_solution.py
ffa812f783556c5f81ae943cd1fa4a0497105321
[ "MIT" ]
permissive
Keesiu/meta-kaggle
77d134620ebce530d183467202cf45639d9c6ff2
87de739aba2399fd31072ee81b391f9b7a63f540
refs/heads/master
2020-03-28T00:23:10.584151
2018-12-20T19:09:50
2018-12-20T19:09:50
147,406,338
0
1
null
null
null
null
UTF-8
Python
false
false
6,176
py
''' DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2004 Sam Hocevar <[email protected]> Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. ''' from datetime import datetime from math import log, exp, sqrt # TL; DR # the main learning process start at line 122 # parameters ################################################################# import sys data_dir=sys.argv[1] sub_dir=sys.argv[2] train = data_dir+'train.csv' # path to training file label = data_dir+'trainLabels.csv' # path to label file of training data test = data_dir+'test.csv' # path to testing file D = 2 ** 23 # number of weights use for each model, we have 32 of them alpha = .1 # learning rate for sgd optimization # function, generator definitions ############################################ # A. x, y generator # INPUT: # path: path to train.csv or test.csv # label_path: (optional) path to trainLabels.csv # YIELDS: # ID: id of the instance (can also acts as instance count) # x: a list of indices that its value is 1 # y: (if label_path is present) label value of y1 to y33 def data(path, label_path=None): for t, line in enumerate(open(path)): # initialize our generator if t == 0: # create a static x, # so we don't have to construct a new x for every instance x = [0] * (146+13*14/2+1) if label_path: label = open(label_path) label.readline() # we don't need the headers continue # parse x for m, feat in enumerate(line.rstrip().split(',')): if m == 0: ID = int(feat) else: # one-hot encode everything with hash trick # categorical: one-hotted # boolean: ONE-HOTTED # numerical: ONE-HOTTED! # note, the build in hash(), although fast is not stable, # i.e., same value won't always have the same hash # on different machines x[m] = abs(hash(str(m) + '_' + feat)) % D row=line.rstrip().split(',') hash_cols = [64,65,61,62,91,92,142,3,4,61,34,91,94,95] t = 146 for i in range(14): for j in range(i+1,14): t += 1 x[t] = abs(hash(str(i)+'_'+str(j)+'_'+row[hash_cols[i]]+"_x_"+row[hash_cols[j]])) % D # parse y, if provided if label_path: # use float() to prevent future type casting, [1:] to ignore id y = [float(y) for y in label.readline().split(',')[1:]] yield (ID, x, y) if label_path else (ID, x) # B. Bounded logloss # INPUT: # p: our prediction # y: real answer # OUTPUT # bounded logarithmic loss of p given y def logloss(p, y): p = max(min(p, 1. - 10e-15), 10e-15) return -log(p) if y == 1. else -log(1. - p) # C. Get probability estimation on x # INPUT: # x: features # w: weights # OUTPUT: # probability of p(y = 1 | x; w) def predict(x, w): wTx = 0. for i in x: # do wTx wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1. return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid # D. Update given model # INPUT: # alpha: learning rate # w: weights # n: sum of previous absolute gradients for a given feature # this is used for adaptive learning rate # x: feature, a list of indices # p: prediction of our model # y: answer # MODIFIES: # w: weights # n: sum of past absolute gradients def update(alpha, w, n, x, p, y): for i in x: # alpha / sqrt(n) is the adaptive learning rate # (p - y) * x[i] is the current gradient # note that in our case, if i in x then x[i] = 1. n[i] += abs(p - y) w[i] -= (p - y) * 1. * alpha / sqrt(n[i]) # training and testing ####################################################### start = datetime.now() # a list for range(0, 33) - 13, no need to learn y14 since it is always 0 K = [k for k in range(33) if k != 13] # initialize our model, all 32 of them, again ignoring y14 w = [[0.] * D if k != 13 else None for k in range(33)] n = [[0.] * D if k != 13 else None for k in range(33)] loss = 0. loss_y14 = log(1. - 10**-15) for ID, x, y in data(train, label): # get predictions and train on all labels for k in K: p = predict(x, w[k]) update(alpha, w[k], n[k], x, p, y[k]) loss += logloss(p, y[k]) # for progressive validation loss += loss_y14 # the loss of y14, logloss is never zero # print out progress, so that we know everything is working if ID % 100000 == 0: print(('%s\tencountered: %d\tcurrent logloss: %f' % ( datetime.now(), ID, (loss/33.)/ID))) for ID, x, y in data(train, label): # get predictions and train on all labels for k in K: p = predict(x, w[k]) update(alpha, w[k], n[k], x, p, y[k]) loss += logloss(p, y[k]) # for progressive validation loss += loss_y14 # the loss of y14, logloss is never zero # print out progress, so that we know everything is working if ID % 100000 == 0: print(('%s\tencountered: %d\tcurrent logloss: %f' % ( datetime.now(), ID, (loss/33.)/ID))) with open(sub_dir+'./submissiontk7.csv', 'w') as outfile: outfile.write('id_label,pred\n') for ID, x in data(test): for k in K: p = predict(x, w[k]) outfile.write('%s_y%d,%s\n' % (ID, k+1, str(p))) if k == 12: outfile.write('%s_y14,0.0\n' % ID) print(('Done, elapsed time: %s' % str(datetime.now() - start)))
7da0cdbd0ae336d14f7023b24a2e9169e58abf11
94fd1381adcfaa5ea64dc13123aef16697b0396a
/covid_dashboard/views/get_districts_daily_report_day_wise/request_response_mocks.py
540280aed7e3719046e9b95366e4f81bc83ed4df
[]
no_license
bharathi151/covid_dashboard
30ac9fe4720b8cd42028b33dcc1b620e0f1ebdb1
930bf3e46e8d7c56c682ce10f7f6e5fa7f50cab8
refs/heads/master
2022-11-14T20:18:24.648922
2020-06-27T10:08:53
2020-06-27T10:08:53
269,612,744
0
0
null
null
null
null
UTF-8
Python
false
false
336
py
RESPONSE_200_JSON = """ [ { "district_name": "string", "district_id": 1, "day_wise_statistics": [ { "total_confirmed_cases": 1, "total_deaths": 1, "total_recovered_cases": 1, "date": "string" } ] } ] """
b90fbfd3c2d421fb70c9156499e70a3a7511340d
4af090efabd08ef73c411a00ce4972a1c6f30a22
/python_100days/7day/practice11.py
82eb730e0a554302387bf8dc26b7ee42b67aaddd
[]
no_license
predatory123/byhytest
e52bca664f9461c9309aaa9bf779c02368ed937c
578206c9ec9253d0d9325e72cdc13dde6eeb2fc1
refs/heads/master
2023-04-26T13:33:14.462408
2021-05-20T13:33:37
2021-05-20T14:26:22
369,213,148
2
0
null
null
null
null
UTF-8
Python
false
false
1,007
py
# 综合案例2:约瑟夫环问题 """ 《幸运的基督徒》 有15个基督徒和15个非基督徒在海上遇险,为了能让一部分人活下来不得不将其中15个人扔到海里面去, 有个人想了个办法就是大家围成一个圈,由某个人开始从1报数,报到9的人就扔到海里面,他后面的人接着从1开始报数, 报到9的人继续扔到海里面,直到扔掉15个人。由于上帝的保佑,15个基督徒都幸免于难,问这些人最开始是怎么站的, 哪些位置是基督徒哪些位置是非基督徒。 """ def main(): persons = [True] * 30 counter, index, number = 0, 0, 0 while counter < 15: if persons[index]: number += 1 if number == 9: persons[index] = False counter += 1 number = 0 index += 1 index %= 30 for person in persons: print('基' if person else '非', end='') if __name__ == '__main__': main()
3fce41e05b897b1b5f9cb8483bc9db41b2f751a0
3c8701e04900389adb40a46daedb5205d479016c
/oldboy-python18/day02-列表-字典/home-work-stu/购物车.py
63b937b4063f23e586269f417564b2537968ebdd
[]
no_license
huboa/xuexi
681300653b834eaf506f49987dcca83df48e8db7
91287721f188b5e24fbb4ccd63b60a80ed7b9426
refs/heads/master
2020-07-29T16:39:12.770272
2018-09-02T05:39:45
2018-09-02T05:39:45
73,660,825
1
0
null
null
null
null
UTF-8
Python
false
false
2,584
py
#coding:utf-8 goods = [ {"name": "电脑", "price": 1999}, {"name": "鼠标", "price": 10}, {"name": "游艇", "price": 20}, {"name": "美女", "price": 998}, ] shopping_car=[] while True: # 获取总资产 total_assets = input('请输入你的总资产:').strip() if len(total_assets) == 0: continue else: if total_assets.isdigit(): total_assets = int(total_assets) print('您的总资产:%d' % total_assets) break else: print('您输入的不符合标准:') continue while True: #显示商品信息 n=1 print('-----------商品信息-----------') for good in goods: good['id']=n print('商品编号:%d ,商品名称:%s ,商品价格:%d' %(n,good['name'],good['price'])) n+=1 print('-----------------------------') # # while True: choice = input('请选择商品:').strip() if len(choice) == 0: continue else: if choice.isdigit(): n=0 for good in goods: if int(choice) == good['id']: #加入到购物车 shopping_car.append((good['name'],good['price'])) n=1 if n == 0: print('你选择的商品不存在:') else: #显示购物车 print('-----------购物车信息-----------') if len(shopping_car) == 0: print('购物车为空') else: for value in shopping_car: print('商品名称:%s ,商品价格:%d' % (value[0], value[1])) print('-----------------------------') break # 结算 while True: is_buy=input('结算请输入y,继续选择商品按任意键').strip() if len(is_buy) != 0 and is_buy == 'y': total_price=0 for i in shopping_car: total_price+=i[1] print('您购买的商品总价格为:%d' %total_price) if total_price > total_assets: print('余额不足。您的余额为%d' %total_assets) break else: total_assets=total_assets-total_price print('购买成功,余额为%d' %total_assets) shopping_car.clear() break else: break
2372a02f129a67fbf7970e593aecdaeb2bdb38b5
55647a80c8b412af9df0ba3f50595cc2f29c25e6
/res/scripts/client/messenger/doc_loaders/colors_schemes.py
5d932c37ceee7ccf7724d9394a83e08eff0f0204
[]
no_license
cnsuhao/WOT-0.9.17-CT
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
d1f932d8cabaf8aa21708622e87f83c8d24d6451
refs/heads/master
2021-06-08T18:11:07.039293
2016-11-19T19:12:37
2016-11-19T19:12:37
null
0
0
null
null
null
null
WINDOWS-1250
Python
false
false
2,484
py
# 2016.11.19 19:53:40 Střední Evropa (běžný čas) # Embedded file name: scripts/client/messenger/doc_loaders/colors_schemes.py from messenger.doc_loaders import _xml_helpers def _readColors(xmlCtx, section, colorsNames, defName): result = {} notFound = colorsNames[:] for tagName, subSec in section.items(): if tagName != 'color': raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName)) ctx = xmlCtx.next(subSec) name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined') if name not in colorsNames: raise _xml_helpers.XMLError(ctx, 'Name of color {0:>s} is invalid'.format(name)) result[name] = _xml_helpers.readRGB(ctx, subSec, 'rgb', 'Color is invalid.') notFound.remove(name) if len(notFound): defColor = 0 if defName in result: defColor = result[defName] for name in notFound: result[name] = defColor return result def _readColorScheme(xmlCtx, section, colorScheme): names = colorScheme.getColorsNames() defName = colorScheme.getDefColorName() for tagName, subSec in section.items(): if tagName == 'name': continue if tagName != 'item': raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName)) ctx = xmlCtx.next(subSec) name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined') colorsSec = subSec['colors'] if not colorsSec: raise _xml_helpers.XMLError(ctx, 'Section "colors" is not defined') colorScheme[name] = _readColors(ctx.next(colorsSec), colorsSec, names, defName) def load(xmlCtx, section, messengerSettings): for tagName, subSec in section.items(): if tagName != 'colorScheme': raise _xml_helpers.XMLError(xmlCtx, 'Tag {0:>s} is invalid'.format(tagName)) ctx = xmlCtx.next(subSec) name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Color scheme name is not defined') colorScheme = messengerSettings.getColorScheme(name) if colorScheme is not None: _readColorScheme(ctx, subSec, colorScheme) return # okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\doc_loaders\colors_schemes.pyc # decompiled 1 files: 1 okay, 0 failed, 0 verify failed # 2016.11.19 19:53:40 Střední Evropa (běžný čas)
6aafd67487c0bd93b6877eceb974ad1a5b907767
ec7ecc5abbdd03fb55f24e89dbbdfa23ebd7b60f
/evaluate postfix expression.py
0287083b4698fdbb7abd669aeabc7e66044a9f3e
[]
no_license
poojithayadavalli/codekata
cd290e009cf3e2f504c99dd4f6de9171f217c6be
1885c45a277cf1023e483bd77edf0c6edf8d95f3
refs/heads/master
2020-07-18T14:06:17.190229
2020-05-30T09:00:29
2020-05-30T09:00:29
206,259,715
0
0
null
null
null
null
UTF-8
Python
false
false
1,541
py
class Evaluate: # Constructor to initialize the class variables def __init__(self, capacity): self.top = -1 self.capacity = capacity # This array is used a stack self.array = [] # check if the stack is empty def isEmpty(self): return True if self.top == -1 else False # Return the value of the top of the stack def peek(self): return self.array[-1] # Pop the element from the stack def pop(self): if not self.isEmpty(): self.top -= 1 return self.array.pop() else: return "$" # Push the element to the stack def push(self, op): self.top += 1 self.array.append(op) # The main function that converts given infix expression # to postfix expression def evaluatePostfix(self, exp): # Iterate over the expression for conversion for i in exp: # If the scanned character is an operand # (number here) push it to the stack if i.isdigit(): self.push(i) # If the scanned character is an operator, # pop two elements from stack and apply it. else: val1 = self.pop() val2 = self.pop() self.push(str(eval(val2 + i + val1))) return int(self.pop()) exp =input() obj = Evaluate(len(exp)) print(obj.evaluatePostfix(exp))
4be0a9347751505cc966aaaae4aa8a00df3626f7
f13acd0d707ea9ab0d2f2f010717b35adcee142f
/AtCoder_Virtual_Contest/macle_20220825/c/main.py
02c948ca2212d942ef5f1445c169292d56933fb5
[ "CC0-1.0", "LicenseRef-scancode-public-domain" ]
permissive
KATO-Hiro/AtCoder
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
bf43320bc1af606bfbd23c610b3432cddd1806b9
refs/heads/master
2023-08-18T20:06:42.876863
2023-08-17T23:45:21
2023-08-17T23:45:21
121,067,516
4
0
CC0-1.0
2023-09-14T21:59:38
2018-02-11T00:32:45
Python
UTF-8
Python
false
false
5,236
py
# -*- coding: utf-8 -*- import math from bisect import bisect_left, bisect_right, insort from typing import Generic, Iterable, Iterator, TypeVar, Union, List T = TypeVar('T') class SortedMultiset(Generic[T]): """Sorted multi set (set) in C++. See: https://qiita.com/tatyam/items/492c70ac4c955c055602 https://github.com/tatyam-prime/SortedSet/blob/main/SortedMultiset.py """ BUCKET_RATIO = 50 REBUILD_RATIO = 170 def _build(self, a=None) -> None: "Evenly divide `a` into buckets." if a is None: a = list(self) size = self.size = len(a) bucket_size = int(math.ceil(math.sqrt(size / self.BUCKET_RATIO))) self.a = [a[size * i // bucket_size: size * (i + 1) // bucket_size] for i in range(bucket_size)] def __init__(self, a: Iterable[T] = []) -> None: "Make a new SortedMultiset from iterable. / O(N) if sorted / O(N log N)" a = list(a) if not all(a[i] <= a[i + 1] for i in range(len(a) - 1)): # type: ignore a = sorted(a) # type: ignore self._build(a) def __iter__(self) -> Iterator[T]: for i in self.a: for j in i: yield j # type: ignore def __reversed__(self) -> Iterator[T]: for i in reversed(self.a): for j in reversed(i): yield j def __len__(self) -> int: return self.size def __repr__(self) -> str: return "SortedMultiset" + str(self.a) def __str__(self) -> str: s = str(list(self)) return "{" + s[1: len(s) - 1] + "}" def _find_bucket(self, x: T) -> List[T]: "Find the bucket which should contain x. self must not be empty." for a in self.a: if x <= a[-1]: # type: ignore return a return a # type: ignore def __contains__(self, x: T) -> bool: if self.size == 0: return False a = self._find_bucket(x) i = bisect_left(a, x) # type: ignore return i != len(a) and a[i] == x def count(self, x: T) -> int: "Count the number of x." return self.index_right(x) - self.index(x) def add(self, x: T) -> None: "Add an element. / O(√N)" if self.size == 0: self.a = [[x]] self.size = 1 return a = self._find_bucket(x) insort(a, x) # type: ignore self.size += 1 if len(a) > len(self.a) * self.REBUILD_RATIO: self._build() def discard(self, x: T) -> bool: "Remove an element and return True if removed. / O(√N)" if self.size == 0: return False a = self._find_bucket(x) i = bisect_left(a, x) # type: ignore if i == len(a) or a[i] != x: return False a.pop(i) self.size -= 1 if len(a) == 0: self._build() return True def lt(self, x: T) -> Union[T, None]: "Find the largest element < x, or None if it doesn't exist." for a in reversed(self.a): if a[0] < x: # type: ignore return a[bisect_left(a, x) - 1] # type: ignore return None def le(self, x: T) -> Union[T, None]: "Find the largest element <= x, or None if it doesn't exist." for a in reversed(self.a): if a[0] <= x: # type: ignore return a[bisect_right(a, x) - 1] # type: ignore return None def gt(self, x: T) -> Union[T, None]: "Find the smallest element > x, or None if it doesn't exist." for a in self.a: if a[-1] > x: # type: ignore return a[bisect_right(a, x)] # type: ignore return None def ge(self, x: T) -> Union[T, None]: "Find the smallest element >= x, or None if it doesn't exist." for a in self.a: if a[-1] >= x: # type: ignore return a[bisect_left(a, x)] # type: ignore return None def __getitem__(self, x: int) -> T: "Return the x-th element, or IndexError if it doesn't exist." if x < 0: x += self.size if x < 0: raise IndexError for a in self.a: if x < len(a): return a[x] # type: ignore x -= len(a) raise IndexError def index(self, x: T) -> int: "Count the number of elements < x." ans = 0 for a in self.a: if a[-1] >= x: # type: ignore return ans + bisect_left(a, x) # type: ignore ans += len(a) return ans def index_right(self, x: T) -> int: "Count the number of elements <= x." ans = 0 for a in self.a: if a[-1] > x: # type: ignore return ans + bisect_right(a, x) # type: ignore ans += len(a) return ans def main(): import sys input = sys.stdin.readline l, q = map(int, input().split()) s = SortedMultiset([0, l]) for i in range(q): ci, xi = map(int, input().split()) if ci == 1: s.add(xi) else: print(s.gt(xi) - s.lt(xi)) if __name__ == "__main__": main()
f3e2452d08102097b71299f1835a5000ecc6f07d
e4f8b14cead542586a96bcaa75993b0a29b3c3d0
/pyNastran/f06/test/test_f06.py
1bd6ea7db2cd64bd4ae4a058a7e38f9e763c9e81
[]
no_license
afcarl/cyNastran
f1d1ef5f1f7cb05f435eac53b05ff6a0cc95c19b
356ee55dd08fdc9880c5ffba47265125cba855c4
refs/heads/master
2020-03-26T02:09:00.350237
2014-08-07T00:00:29
2014-08-07T00:00:29
144,398,645
1
0
null
2018-08-11T15:56:50
2018-08-11T15:56:50
null
UTF-8
Python
false
false
5,968
py
import os import sys import time from traceback import print_exc import pyNastran from pyNastran.f06.f06 import F06 #from pyNastran.op2.test.test_op2 import parseTableNamesFromF06, getFailedFiles def run_lots_of_files(files, debug=True, saveCases=True, skipFiles=[], stopOnFailure=False, nStart=0, nStop=1000000000): n = '' iSubcases = [] failedCases = [] nFailed = 0 nTotal = 0 nPassed = 0 t0 = time.time() for i, f06file in enumerate(files[nStart:nStop], nStart): # 149 baseName = os.path.basename(f06file) #if baseName not in skipFiles and not baseName.startswith('acms') and i not in nSkip: if baseName not in skipFiles: print("%" * 80) print('file=%s\n' % f06file) n = '%s ' % (i) sys.stderr.write('%sfile=%s\n' % (n, f06file)) nTotal += 1 isPassed = run_f06(f06file, iSubcases=iSubcases, debug=debug, stopOnFailure=stopOnFailure) # True/False if not isPassed: sys.stderr.write('**file=%s\n' % (f06file)) failedCases.append(f06file) nFailed += 1 else: nPassed += 1 #sys.exit('end of test...test_f06.py') if saveCases: f = open('failedCases.in', 'wb') for f06file in failedCases: f.write('%s\n' % (f06file)) f.close() print("dt = %s seconds" % (time.time() - t0)) #f06 = F06('test_tet10_subcase_1.f06') #f06.readF06() sys.exit('-----done with all models %s/%s=%.2f%% nFailed=%s-----' % (nPassed, nTotal, 100. * nPassed / float(nTotal), nTotal - nPassed)) def run_f06(f06_filename, iSubcases=[], write_f06=True, debug=False, stopOnFailure=True): isPassed = False #stopOnFailure = False #debug = True try: f06 = F06(debug=debug) #f06.set_subcases(iSubcases) # TODO not supported #f06.readBDF(f06.bdf_filename,includeDir=None,xref=False) f06.read_f06(f06_filename) #tableNamesF06 = parseTableNamesFromF06(f06.f06FileName) #tableNamesF06 = f06.getTableNamesFromF06() assert write_f06 == True, write_f06 if write_f06: (model, ext) = os.path.splitext(f06_filename) f06.write_f06(model + '.test_f06.f06') #print "subcases = ",f06.subcases #assert tableNamesF06==tableNamesF06,'tableNamesF06=%s tableNamesF06=%s' %(tableNamesF06,tableNamesF06) #f06.caseControlDeck.sol = f06.sol #print f06.caseControlDeck.getF06Data() #print f06.print_results() #print f06.caseControlDeck.getF06Data() isPassed = True except KeyboardInterrupt: sys.stdout.flush() print_exc(file=sys.stdout) sys.stderr.write('**file=%r\n' % f06file) sys.exit('keyboard stop...') #except AddNewElementError: # raise #except IOError: # missing file #pass #except AssertionError: # isPassed = True #except InvalidFormatCodeError: # isPassed = True #except RuntimeError: #InvalidAnalysisCode # isPassed = True #except SyntaxError: #Invalid Markers # isPassed = True except SystemExit: #print_exc(file=sys.stdout) #sys.exit('stopping on sys.exit') raise #except NameError: # variable isnt defined # if stopOnFailure: # raise # else: # isPassed = True #except AttributeError: # missing function # if stopOnFailure: # raise # else: # isPassed = True #except KeyError: # raise #except TypeError: # numpy error # isPassed = True #except IndexError: # bad bdf # isPassed = True #except IOError: # missing bdf file #isPassed = False #raise #except SyntaxError: #Invalid Subcase # isPassed = True #except SyntaxError: # Param Parse: # isPassed = True #except NotImplementedError: #isPassed = True #except InvalidFieldError: # bad bdf field # isPassed = True except: #print e print_exc(file=sys.stdout) if stopOnFailure: raise else: isPassed = False print "isPassed =", isPassed return isPassed def main(): from docopt import docopt msg = 'Tests to see if an F06 will work with pyNastran.\n' msg += 'Usage:\n' msg += ' f06.py [-f] [-p] [-q] F06_FILENAME' msg += ' f06.py -h | --help\n' msg += ' f06.py -v | --version\n' msg += '\n' msg += 'Positional Arguments:\n' msg += ' F06_FILENAME path to F06 file\n' msg += '\n' msg += 'Options:\n' msg += ' -q, --quiet prints debug messages (default=False)\n' msg += ' -f, --write_f06 writes the f06 to fem.f06.out (default=True)\n' msg += ' -h, --help show this help message and exit\n' msg += " -v, --version show program's version number and exit\n" # disabled b/c the F06 doesn't support complex well #msg += ' -z, --is_mag_phase F06 Writer writes Magnitude/Phase instead of\n' #msg += ' Real/Imaginary (still stores Real/Imag)\n' if len(sys.argv) == 1: sys.exit(msg) ver = str(pyNastran.__version__) data = docopt(msg, version=ver) for key, value in sorted(data.iteritems()): print("%-12s = %r" % (key.strip('--'), value)) if os.path.exists('skippedCards.out'): os.remove('skippedCards.out') run_f06(data['F06_FILENAME'], write_f06 = data['--write_f06'], debug = not(data['--quiet']), stopOnFailure = True ) if __name__ == '__main__': # f06 main()
[ "mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b" ]
mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b
6142e7a74039e267ec08477e21952b9991b89888
4ee5affb8b16ff7d26df9b19ffee8d675df11e4e
/nested-loops/train_the_trainers.py
ce76aebb5569e2ac15837eb95cccaa5edc35603a
[]
no_license
ayk-dev/python-basics
f60849f6502d64445105a0d27272d9910ea1d509
af6d04f9001d9a45e8474f9bd4fa2b3ebe380c97
refs/heads/main
2023-01-12T11:56:12.210880
2020-11-17T20:06:40
2020-11-17T20:06:40
311,747,624
0
0
null
null
null
null
UTF-8
Python
false
false
594
py
n = int(input()) # number of people in jury presentation_counter = 0 presentaion = input() all_presentations_grades = 0 while presentaion != 'Finish': total = 0 for pres in range(1, n + 1): grade = float(input()) total += grade average_grade = total / n all_presentations_grades += average_grade print(f'{presentaion} - {average_grade:.2f}.') presentaion = input() presentation_counter += 1 final_average = all_presentations_grades / presentation_counter print(f"Student's final assessment is {final_average:.2f}.")
c994ba0a911d0bf5726934a74e94cc5b6ea8197c
da878a03674024f290775b2c10d745edf091a4dc
/Global Fires/venv/Scripts/pip3-script.py
d05b0aa101ecf28df5c3555bf979ec367071f105
[ "MIT" ]
permissive
EnriqueGambra/Global-Fires
1b3aa5670dbb69804c733b865c7906f6e9698995
652606ccd573e7bfd7a232876f0b59fcefc15f9b
refs/heads/master
2020-08-03T00:44:38.156931
2019-09-28T23:30:43
2019-09-28T23:30:43
211,568,271
0
0
null
null
null
null
UTF-8
Python
false
false
430
py
#!"C:\Users\Owner\github-Repos\Global-Fires\Global Fires\venv\Scripts\python.exe" # EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3' __requires__ = 'pip==10.0.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')() )
ff22176a2b050a193f1882462e0d36e591e42784
cb0e7d6493b23e870aa625eb362384a10f5ee657
/solutions/python3/0567.py
65478b7cc2fb087117f7698fe743cdccb13f091a
[]
no_license
sweetpand/LeetCode-1
0acfa603af254a3350d457803449a91322f2d1a7
65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94
refs/heads/master
2022-11-14T07:01:42.502172
2020-07-12T12:25:56
2020-07-12T12:25:56
279,088,171
1
0
null
2020-07-12T15:03:20
2020-07-12T15:03:19
null
UTF-8
Python
false
false
500
py
class Solution: def checkInclusion(self, s1: str, s2: str) -> bool: count1 = collections.Counter(s1) required = len(s1) for r, c in enumerate(s2): count1[c] -= 1 if count1[c] >= 0: required -= 1 if r >= len(s1): count1[s2[r - len(s1)]] += 1 if count1[s2[r - len(s1)]] > 0: required += 1 if required == 0: return True return False
52564c55ce188af128e41cc3810567e62b0cb71c
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/otherforms/_wisecracked.py
df762aa40f8d90ebf7ab0b38869d1bab6c31eb7e
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
236
py
#calss header class _WISECRACKED(): def __init__(self,): self.name = "WISECRACKED" self.definitions = wisecrack self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.basic = ['wisecrack']
28d7853629e519d31e6615eabe002706b6b08b38
4cb2bbd929ba3722d78cd6bd9feb2c5c0dd57025
/olympic/forms.py
d27d0f59c4a933f98e12550b23203e2966edaad2
[ "BSD-2-Clause" ]
permissive
mjtamlyn/tamlynscore
ebeebdc73feeab86995a2cb888e1bea203854553
c6ac4e9a5e37dc3778b1f754b3143e44fa8dc0bc
refs/heads/master
2023-08-24T11:00:16.153489
2023-08-08T11:30:24
2023-08-08T11:30:24
17,013,657
7
2
BSD-3-Clause
2023-08-05T19:52:51
2014-02-20T08:28:08
Python
UTF-8
Python
false
false
3,102
py
from django import forms from .models import Result, SessionRound class ResultForm(forms.ModelForm): class Meta: model = Result exclude = ('match', 'seed') class SetupForm(forms.Form): SPREAD_CHOICES = ( ('', 'No special options'), ('expanded', 'One target per archer'), ) MATCH_CHOICES = ( ('', 'All matches'), ('half', 'Only allocate half of the matches'), ('quarter', 'Only allocate 1/4 of the matches'), ('eighth', 'Only allocate 1/8 of the matches'), ('three-quarter', 'Only allocate 3/4 of the matches'), ('first-half', 'Only allocate first half of the matches / Final only'), ('second-half', 'Only allocate second half of the matches / Bronze only'), ) LEVEL_CHOICES = ( (1, 'Finals'), (2, 'Semis'), (3, 'Quarters'), (4, '1/8'), (5, '1/16'), (6, '1/32'), (7, '1/64'), (8, '1/128'), ) TIMING_CHOICES = ( (1, 'Pass A'), (2, 'Pass B'), (3, 'Pass C'), (4, 'Pass D'), (5, 'Pass E'), (6, 'Pass F'), (7, 'Pass G'), (8, 'Pass H'), (9, 'Pass I'), (10, 'Pass J'), ) session_round = forms.ModelChoiceField(SessionRound.objects) start = forms.IntegerField(label='Start target') level = forms.TypedChoiceField(coerce=int, choices=LEVEL_CHOICES) timing = forms.TypedChoiceField(label='Pass', coerce=int, choices=TIMING_CHOICES) spread = forms.ChoiceField(label='Target spread', choices=SPREAD_CHOICES, required=False) matches = forms.ChoiceField(label='Matches', choices=MATCH_CHOICES, required=False) delete = forms.BooleanField(required=False) def __init__(self, session_rounds, **kwargs): self.session_rounds = session_rounds super(SetupForm, self).__init__(**kwargs) self.fields['session_round'].queryset = session_rounds def save(self): sr = self.cleaned_data['session_round'] kwargs = { 'level': self.cleaned_data['level'], 'start': self.cleaned_data['start'], 'timing': self.cleaned_data['timing'], } if sr.shot_round.team_type: kwargs['expanded'] = True if self.cleaned_data['spread'] == 'expanded': kwargs['expanded'] = True if self.cleaned_data['matches'] == 'half': kwargs['half_only'] = True if self.cleaned_data['matches'] == 'quarter': kwargs['quarter_only'] = True if self.cleaned_data['matches'] == 'eighth': kwargs['eighth_only'] = True if self.cleaned_data['matches'] == 'three-quarter': kwargs['three_quarters'] = True if self.cleaned_data['matches'] == 'first-half': kwargs['first_half_only'] = True if self.cleaned_data['matches'] == 'second-half': kwargs['second_half_only'] = True if self.cleaned_data['delete']: sr.remove_matches(self.cleaned_data['level']) else: sr.make_matches(**kwargs)
641aacc8b6854764e829d6932d4d0627ea980786
19d03d646fcee318cca8078af27636732290d77b
/parlai/utils/flake8.py
1170b4bbb4a717b201637e00678bf96a87614026
[ "MIT" ]
permissive
yongkyung-oh/CMU-Studio-Project
2d6fe6ef6fa30fda1a4f2d1fc45c5b85d6143775
448492f342e8157df2e736aa52825b66b1d66fd7
refs/heads/master
2022-10-24T16:56:46.763865
2020-07-01T10:03:00
2020-07-01T10:03:00
252,878,283
2
5
MIT
2021-03-25T23:50:27
2020-04-04T01:02:44
Python
UTF-8
Python
false
false
3,424
py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Custom flake8 rules for ParlAI. Includes: - Checks for python3 shebang - Check for copyright message - Docformatter issues (TODO) """ import docformatter import difflib PYTHON_SHEBANG = '#!/usr/bin/env python3' WHITELIST_PHRASES = ['Moscow Institute of Physics and Technology.'] WHITELIST_FNS = ["mlb_vqa"] COPYRIGHT = [ "Copyright (c) Facebook, Inc. and its affiliates.", "This source code is licensed under the MIT license found in the", "LICENSE file in the root directory of this source tree.", ] class ParlAIChecker: """ Custom flake8 checker for some special ParlAI requirements. """ name = 'flake8-parlai' version = '0.1' def __init__(self, tree=None, filename=None, lines=None): self.filename = filename self.lines = lines def run(self): if self.lines is None: with open(self.filename) as f: self.lines = f.readlines() if self.lines and PYTHON_SHEBANG not in self.lines[0]: yield ( 1, 0, 'PAI100 Missing python3 shebang. (`#!/usr/bin/env python3`)', '', ) # check doc formatting source = "".join(self.lines) formatted_source = docformatter.format_code( source, pre_summary_newline=True, description_wrap_length=88, summary_wrap_length=88, make_summary_multi_line=True, force_wrap=False, ) if source != formatted_source: diff = difflib.unified_diff( source.split('\n'), # have to strip newlines formatted_source.split('\n'), f'before/{self.filename}', f'after/{self.filename}', n=0, lineterm='', ) for line in diff: if line.startswith('@@'): fields = line.split() # find out the beginning line of the docstring reformat. Example: # --- /path/to/original timestamp # +++ /path/to/new timestamp # @@ -1,3 +1,9 @@ # that -1 says the first line changed, and 3 lines were removed # with a new offset belonging at the first line, and 9 # inserted lines. line_no, *_ = fields[1].split(',') line_no = -int(line_no) yield ( line_no, 1, f'PAI101 autoformat.sh would reformat the docstring', '', ) # the rest is checking copyright, but there are some exceptions # copyright must appear in the first 16 lines of the file. source = "".join(self.lines[:16]) if any(wl in source for wl in WHITELIST_PHRASES): return for i, msg in enumerate(COPYRIGHT, 1): if any(wl in self.filename for wl in WHITELIST_FNS) and i < 3: continue if source and msg not in source: yield (i, 0, f'PAI20{i} Missing copyright `{msg}`', '')
57e19bf0eacc2c9dc6bfd1452ebf6c427e698494
311ce6fbe1b264f2b656ba235371e756695dca53
/forcing/dot_in/aestus1_A1_ae1/make_dot_in.py
3e296f6d56c6863053a5285cd0f5d84cb28cdf8f
[ "MIT" ]
permissive
parkermac/LiveOcean
94bc9cb9fba1bdc2e206488e0e2afadfafeabb34
4bd2776cf95780a7965a18addac3c5e395703ce5
refs/heads/master
2022-11-30T10:21:50.568014
2022-11-21T16:32:55
2022-11-21T16:32:55
35,834,637
7
2
null
null
null
null
UTF-8
Python
false
false
4,180
py
""" This creates and poulates directories for ROMS runs on gaggle. It is designed to work with the "BLANK" version of the .in file, replacing things like $whatever$ with meaningful values. """ import os import sys fpth = os.path.abspath('../../') if fpth not in sys.path: sys.path.append(fpth) import forcing_functions as ffun Ldir, Lfun = ffun.intro() from datetime import datetime, timedelta fdt = datetime.strptime(Ldir['date_string'], '%Y.%m.%d') fdt_yesterday = fdt - timedelta(1) print('- dot_in.py creating files for LiveOcean for ' + Ldir['date_string']) #### USER DEFINED VALUES #### gtag = Ldir['gtag'] gtagex = gtag + '_' + Ldir['ex_name'] EX_NAME = Ldir['ex_name'].upper() multi_core = True # use more than one core if Ldir['run_type'] == 'backfill': days_to_run = 1.0 else: days_to_run = 1.0 dtsec = 30 # time step in seconds INTEGER (should fit evenly into 3600 sec) restart_nrrec = '-1' # '-1' for a non-crash restart file, otherwise '1' or '2' his_interval = 3600 # seconds to define and write to history files rst_interval = 1 # days between writing to the restart file (e.g. 5) zqt_height = '2.0d0' zw_height = '10.0d0' #### END USER DEFINED VALUES #### # DERIVED VALUES if multi_core: ntilei = '12' # number of tiles in I-direction (6) ntilej = '6' # number of tiles in J-direction (12) else: ntilei = '1' ntilej = '1' if float(3600/dtsec) != 3600.0/dtsec: print('** WARNING: dtsec does not fit evenly into 1 hour **') dt = str(dtsec) + '.0d0' # a string version of dtsec, for the .in file ninfo = int(his_interval/dtsec) # how often to write info to the log file (# of time steps) nhis = int(his_interval/dtsec) # how often to write to the history files ndefhis = int(nhis) # how often to create new history files nrst = int(rst_interval*86400/dtsec) ntimes = int(days_to_run*86400/dtsec) # file location stuff date_string = Ldir['date_string'] date_string_yesterday = fdt_yesterday.strftime('%Y.%m.%d') dstart = str(int(Lfun.datetime_to_modtime(fdt) / 86400.)) f_string = 'f' + date_string f_string_yesterday = 'f'+ date_string_yesterday # where forcing files live (fjord, as seen from gaggle) lo_dir = '/fjdata1/parker/LiveOcean/' loo_dir = '/fjdata1/parker/LiveOcean_output/' grid_dir = '/fjdata1/parker/LiveOcean_data/grids/' + Ldir['gridname'] + '/' force_dir = loo_dir + gtag + '/' + f_string + '/' roms_dir = '/pmr1/parker/LiveOcean_roms/' roms_name = 'ROMS_820' # the .in file dot_in_name = 'liveocean.in' # name of the .in file dot_in_dir0 = Ldir['roms'] + 'output/' + gtagex + '/' Lfun.make_dir(dot_in_dir0) # make sure it exists dot_in_dir = dot_in_dir0 + f_string +'/' Lfun.make_dir(dot_in_dir, clean=True) # make sure it exists and is empty # where to put the output files according to the .in file out_dir0 = roms_dir + 'output/' + gtagex + '/' out_dir = out_dir0 + f_string + '/' atm_dir = 'atm/' # which atm forcing files to use ocn_dir = 'ocnA/' # which ocn forcing files to use riv_dir = 'riv1/' # which riv forcing files to use tide_dir = 'tideA/' # which tide forcing files to use if Ldir['start_type'] == 'continuation': nrrec = '0' # '-1' for a hot restart ininame = 'ocean_rst.nc' # for a hot perfect restart #ininame = 'ocean_his_0025.nc' # for a hot restart ini_fullname = out_dir0 + f_string_yesterday + '/' + ininame elif Ldir['start_type'] == 'new': nrrec = '0' # '0' for a history or ini file ininame = 'ocean_ini.nc' # could be an ini or history file ini_fullname = force_dir + ocn_dir + ininame # END DERIVED VALUES ## create .in ########################## f = open('BLANK.in','r') f2 = open(dot_in_dir + dot_in_name,'w') in_varlist = ['base_dir','ntilei','ntilej','ntimes','dt','nrrec','ninfo', 'nhis','dstart','ndefhis','nrst','force_dir','grid_dir','roms_dir', 'atm_dir','ocn_dir','riv_dir','tide_dir','dot_in_dir', 'zqt_height','zw_height','ini_fullname','out_dir','EX_NAME','roms_name'] for line in f: for var in in_varlist: if '$'+var+'$' in line: line2 = line.replace('$'+var+'$', str(eval(var))) line = line2 else: line2 = line f2.write(line2) f.close() f2.close()
dff2b536322cbc8ac24cd00ed962fdad5d4bbba2
592961def9fe287a31e117649f1ac1e97b085a9b
/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
a6582e6dd5b515ec3783b7ecc3ed81adffc4f3cb
[]
no_license
Rushin95/The_Trip_Planner-Lyft_vs_Uber
62f03a1df8c6a0268089f50f4e80ec3d9b6b9870
4eeea4029eb4df047471b92065455a6828232293
refs/heads/master
2021-01-19T11:52:47.766019
2018-05-03T23:59:58
2018-05-03T23:59:58
82,268,914
1
0
null
null
null
null
UTF-8
Python
false
false
663
py
from __future__ import absolute_import, division, unicode_literals from . import base try: from collections import OrderedDict except ImportError: # noinspection PyUnresolvedReferences from ordereddict import OrderedDict class Filter(base.Filter): def __iter__(self): for token in base.Filter.__iter__(self): if token["type"] in ("StartTag", "EmptyTag"): attrs = OrderedDict() for name, value in sorted(token["data"].items(), key=lambda x: x[0]): attrs[name] = value token["data"] = attrs yield token
2e0a0431b921c67132029866d0dc9a2fe708b565
e0268b6e868fcaaf6fc9c42b720e014c3ae41a20
/scripts/make_bu_data.py
ee30a5f8470d550046a3ed6c5170a7e7aee29344
[ "MIT" ]
permissive
gradio-app/ImageCaptioning.pytorch
79208726dd09e1e532863af56c7a900b576cbca2
436d900d01139dc402b24425c60679409e0c9051
refs/heads/master
2022-11-15T03:27:38.775656
2020-07-12T22:44:30
2020-07-12T22:44:30
279,639,722
1
1
MIT
2020-07-14T16:37:47
2020-07-14T16:37:46
null
UTF-8
Python
false
false
1,889
py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import base64 import numpy as np import csv import sys import zlib import time import mmap import argparse parser = argparse.ArgumentParser() # output_dir parser.add_argument('--downloaded_feats', default='data/bu_data', help='downloaded feature directory') parser.add_argument('--output_dir', default='data/cocobu', help='output feature files') args = parser.parse_args() csv.field_size_limit(sys.maxsize) FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features'] infiles = ['trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv', 'trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv',\ 'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0', \ 'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1'] os.makedirs(args.output_dir+'_att') os.makedirs(args.output_dir+'_fc') os.makedirs(args.output_dir+'_box') for infile in infiles: print('Reading ' + infile) with open(os.path.join(args.downloaded_feats, infile), "r+b") as tsv_in_file: reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES) for item in reader: item['image_id'] = int(item['image_id']) item['num_boxes'] = int(item['num_boxes']) for field in ['boxes', 'features']: item[field] = np.frombuffer(base64.decodestring(item[field]), dtype=np.float32).reshape((item['num_boxes'],-1)) np.savez_compressed(os.path.join(args.output_dir+'_att', str(item['image_id'])), feat=item['features']) np.save(os.path.join(args.output_dir+'_fc', str(item['image_id'])), item['features'].mean(0)) np.save(os.path.join(args.output_dir+'_box', str(item['image_id'])), item['boxes'])
ce037214f60bd6c8975b5e9da15eaaa6acd30d83
685038d4be188fa72e9dba1d2213a47ee3aa00a2
/ECOS2021/Demands/Inputs/Surveys/A/S3/Oct_S3_A.py
f3bb6b79446fe8f081e16398f9239662c9c7acc0
[]
no_license
CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy
e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4
459f31552e3ab57a2e52167ab82f8f48558e173c
refs/heads/master
2023-06-01T18:09:29.839747
2021-06-19T15:56:26
2021-06-19T15:56:26
343,720,452
0
0
null
null
null
null
UTF-8
Python
false
false
2,968
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jan 31 14:33:07 2020 @author: alejandrosoto Script for 2 class of household in Raqaypampa. """ # -*- coding: utf-8 -*- """ @author: Alejandro Soto """ from core import User, np User_list = [] #User classes definition HI = User("high income",1) User_list.append(HI) LI = User("low income",0) User_list.append(LI) ''' Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1) Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2) A Scenario 1: BSA + Fridge (1) + Freezer* (1). Scenario 2: BSA + Fridge (1). Scenario 3: BSA + Fridge (1)*. Scenario 4: BSA + Freezer (1). Scenario 5: BSA + Wheler (1). Scerario 6: BSA + Grinder (1). Scanerio 7: Add + Dryer (1), Scenario 9: All B Scenario 8: BSB + Water Heater** (1). Scenario 10: BSA + Pump Water (1). Scenario 11: BSA + DVD (1). Scenario 12: BSA + Blender (1). Scenario 13: BSA + Iron (1). Scerario 14: BSA + Mill (1). * With seasonal variation ** Occasional use Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees Int Cycle 10:01-15:00 ''' #High-Income #indoor bulb HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190) HI_indoor_bulb.windows([1080,1440],[0,0]) #outdoor bulb HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300) HI_outdoor_bulb.windows([1100,1440],[0,0]) HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110) HI_Radio.windows([420,708],[0,0]) #tv HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114) HI_TV.windows([1140,1440],[651,1139],0.35,[300,650]) #phone charger HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95) HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189]) #water_heater HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30) HI_Water_heater.windows([0,1440],[0,0]) #mixer HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3) HI_Mixer.windows([420,560],[0,0]) #fridge HI_Fridge = HI.Appliance(HI,1,200,1,1440,0,30,'yes',3) HI_Fridge.windows([0,1440],[0,0]) HI_Fridge.specific_cycle_1(200,20,5,10) HI_Fridge.specific_cycle_2(200,15,5,15) HI_Fridge.specific_cycle_3(200,10,5,20) HI_Fridge.cycle_behaviour([570,990],[0,0],[0,480],[1170,1440],[481,569],[991,1169]) #Lower Income #indoor bulb LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124) LI_indoor_bulb.windows([1153,1440],[0,300],0.5) #outdoor bulb LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71) LI_outdoor_bulb.windows([1197,1440],[0,0]) #radio LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49) LI_Radio.windows([480,840],[841,1200],0.5) #TV LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74) LI_TV.windows([1170,1420],[551,1169],0.3,[300,550]) #phone charger LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82) LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
df585f561e1bd0f95edb526fd662fc99e5cba754
f56fda98a93cedcec33a7d9fbb330e5cf78031e1
/Leetcode/45. Jump Game II.py
b2d963b2956cda7d0acaeac20324868e1d0d0149
[]
no_license
GuanzhouSong/Leetcode_Python
7a2bac42203fb6c0b671153d9e300eb0c73d39d1
dbb9be177c5e572eb72a79508bb6e24f357d54b3
refs/heads/master
2021-09-25T04:10:09.217565
2018-10-17T22:31:41
2018-10-17T22:31:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
408
py
import sys class Solution: def jump(self, nums): mintimes = [0] + [sys.maxsize] * (len(nums) - 1) for i in range(0, len(nums) - 1): for j in range(1, min(nums[i] + 1, len(nums) - i)): mintimes[i + j] = min(mintimes[i + j], mintimes[i] + 1) return mintimes[-1] s = Solution() nums = [6, 2, 6, 1, 7, 9, 3, 5, 3, 7, 2, 8, 9, 4, 7, 7, 2, 2, 8, 4, 6, 6, 1, 3] print(s.jump2(nums))
382ab283e99868eb7c25aae590e703339aa079d0
f4b694982027ac362de1e9d6755f2943d0355a06
/DECSKS-12 -- debugging the recast from DECSKS-09 by comparing with v1.2/v1.2/DECSKS/lib/diagnostics.py
e4234421bf17641907309887eb6c7504590672c3
[]
no_license
dsirajud/IPython-notebooks
55275e44191c16f5393571522787993f931cfd98
6ad9d978c611558525fc9d716af101dc841a393b
refs/heads/master
2021-01-15T15:33:57.119172
2016-07-13T20:08:29
2016-07-13T20:08:29
35,054,473
0
0
null
null
null
null
UTF-8
Python
false
false
4,472
py
import DECSKS import numpy as np import sys # to retrieve smallest float for lower bound tolerance def HighPrecisionE(number): """Converts a number into a string object while retaining a chosen degree of precision. This is designed to evade the truncation that is involved with str() so that outputs can store numbers with high precision inputs: number -- (number) outputs: string object with chosen precision in scientific notation """ return "%.22e" % number def calcs_and_writeout(sim_params,f,n,x,v): """orchestrates the calculation of various quantities, e.g. Lp norms, energy, electrostatic energy, ... inputs: files -- (dict) contains output filenames to be written to f -- (ndarray, ndim=3), f(t,x,v) n -- (int) time step number, t^n x -- (instance) space variable v -- (instance) velocity variable outputs: None """ #I = "invariant", I1 = L1 norm invariant, etc. if sim_params['record_outputs'] == 'yes': I1 = L1(f,n,x,v) I2 = L2(f,n,x,v) # electrostatic terms E = DECSKS.lib.fieldsolvers.Poisson(sim_params['ni'], f, x, v, n) IW = total_energy(f,n,x,v,E) WE = electrostatic_energy(x,E) S = entropy(f,n,x,v) # write to files sim_params['outfiles']['I1'].write(HighPrecisionE(I1) + '\n') sim_params['outfiles']['I2'].write(HighPrecisionE(I2) + '\n') sim_params['outfiles']['IW'].write(HighPrecisionE(IW) + '\n') sim_params['outfiles']['WE'].write(HighPrecisionE(WE) + '\n') sim_params['outfiles']['S'].write(HighPrecisionE(S) + '\n') if n == sim_params['Nt']: close_all_outfiles(sim_params) return None def L1(f,n,x,v): """computes the L1 norm inputs: f -- (ndarray, ndim=3), f(t,x,v) n -- (int) time step number, t^n x -- (instance) space variable v -- (instance) velocity variable outputs: I1 -- (float) L1 norm """ return np.sum(f[n,:,:]) * x.width * v.width def L2(f,n,x,v): """computes the square of the L2 norm. Note, the intended purpose of this computation is to compare with its deviation from the value at time zero. To minimize compounded errors from redundant operations, a squareroot is not taken here and should be applied later if desired, e.g. np.sqrt( (L2[t] - L2[0]) / L2[0]) inputs: f -- (ndarray, ndim=3), f(t,x,v) n -- (int) time step number, t^n x -- (instance) space variable v -- (instance) velocity variable outputs: I2 -- (float) L2 norm """ # compute the square of the L2 norm below to minimize # compounded error from repeated operations like squareroot return np.sum(f[n,:,:]**2) * x.width * v.width def total_energy(f,n,x,v,E): """computes the total energy for a Vlasov-Poisson system IW = 1/2 sum_i sum_j f[n,i,j] dx dv + 1/2 sum_i E[i] dx inputs: f -- (ndarray, ndim=3), f(t,x,v) n -- (int) time step number, t^n x -- (instance) space variable v -- (instance) velocity variable E -- (ndarray, ndim=1), E(x) at t^n outputs: IW -- (float) total energy at time t^n in system """ return 1/2.*np.sum(f[n,:,:] * v.cells **2) * x.width * v.width \ + 1/2. * np.sum(E**2) * x.width def electrostatic_energy(x,E): """computes the electrostic energy WE = 1/2 sum_i E[i] dx inputs: E -- (ndarray, ndim=1) E(x) at t^n x -- (instance) space variable outputs: WE -- (float) electrostatic energy at time t^n """ return 1/2.* np.sum(E**2)* x.width def entropy(f,n,x,v): """computes the entropy S at time t^n, S = sum_i sum_j f_[n,i,j] * ln (f[n,i,j] + eps) dxdv inputs: f -- (ndarray, ndim=3), f(t,x,v) n -- (int) time step number, t^n x -- (instance) space variable v -- (instance) velocity variable outputs: S -- (float) entropy at time t^n """ eps = sys.float_info.min # to evade taking np.log(0) return np.sum(f[n,:,:] * np.log(f[n,:,:] + eps)) * x.width * v.width def close_all_outfiles(sim_params): """Closes all opened output files inside dictionary sim_params['outfiles'] inputs: sim_params -- (dict) simulation parameters, includes dict of outfiles outputs: None """ if sim_params['outfiles'] is not None: for outfile in sim_params['outfiles'].itervalues(): outfile.close() return None
bfcab4cecd2a7d8e3946cf55d03659e839d25b3d
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/82/usersdata/165/44623/submittedfiles/decimal2bin.py
b07134dde6b2df7bd468626e44d12cc75e301ed4
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
153
py
# -*- coding: utf-8 -*- n=int(input('digite n:')) i=0 soma=0 while n>0: resto=n%10 soma=soma+resto*(2**i) n=n//10 i=i+1 print(soma)
9feacf0a85e2b4cb750a3f12f786d8971b96efc5
54f352a242a8ad6ff5516703e91da61e08d9a9e6
/Source Codes/AtCoder/arc042/B/4081354.py
8eea907c466a07c6b45bfcd05fcae80479294c1a
[]
no_license
Kawser-nerd/CLCDSA
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
aee32551795763b54acb26856ab239370cac4e75
refs/heads/master
2022-02-09T11:08:56.588303
2022-01-26T18:53:40
2022-01-26T18:53:40
211,783,197
23
9
null
null
null
null
UTF-8
Python
false
false
250
py
#!/usr/bin/env python3 p = complex(*list(map(int, input().split()))) N = int(input()) li = [complex(*list(map(int, input().split()))) for _ in range(N)] li += [li[0]] m = min(((p - a) / (b - a)).imag * abs(b - a) for a, b in zip(li, li[1:])) print(m)
79f62a7ee6eb1f0d6df192c475af8fec47ca39a9
ea5af064f6583c4dc244627f67bf51a9119347a9
/crypto.py
4c6a27ad97768b78070c68886cdd9f351d4f73f8
[]
no_license
celiyan/PyPassManager
034c10cfe594d365822dc836e0f0143e02ac25e3
fda994b44b7a003825e16bbcaffd07cf094e04b7
refs/heads/master
2022-12-19T19:51:29.714559
2020-10-15T05:16:37
2020-10-15T05:16:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,530
py
from Crypto.Cipher import AES from os import urandom def pad(txt): "AES CBC requires the number of plaintext bytes to be a multiple of 16, so we pad it to the nearest multiple. Takes&Returns bytes object." padding_length = AES.block_size - len(txt)%AES.block_size # we pad with a character = to the padding length, to make unpadding easy padding = chr(padding_length) * padding_length return txt+padding.encode() def unpad(txt): "To get just the encrypted data back, we need to undo any meaningless padding we added to satisfy length requirements. Takes&Returns bytes object." padding_length = txt[-1] # length is stored as the character code of the padding return txt[:-padding_length] def encrypt(raw, key): "Encrypt bytes using AES CBC, and a random InitialVector that is stored at the start. Inputs two bytes objects: plaintext & key. Returns ciphertext as bytes object." iv = urandom(AES.block_size) key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes cipher = AES.new(key, AES.MODE_CBC, iv) return iv+cipher.encrypt(pad(raw)) # store iv so it can be decoded def decrypt(data, key): "Decrypt bytes using AES CBC, extracting the InitialVector from the start. Inputs two bytes objects: ciphertext & key. Returns plaintext as bytes object." iv, data = data[:AES.block_size], data[AES.block_size:] # extract the iv from the start key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes cipher = AES.new(key, AES.MODE_CBC, iv) return unpad(cipher.decrypt(data))
8284303e2d78a6089a9fd4c7ccbb37454b2e67c4
503d2f8f5f5f547acb82f7299d86886691966ca5
/atcoder/abc200_c.py
e206350c17a0371913a9b0f7696b9550c9039895
[]
no_license
Hironobu-Kawaguchi/atcoder
3fcb649cb920dd837a1ced6713bbb939ecc090a9
df4b55cc7d557bf61607ffde8bda8655cf129017
refs/heads/master
2023-08-21T14:13:13.856604
2023-08-12T14:53:03
2023-08-12T14:53:03
197,216,790
0
0
null
null
null
null
UTF-8
Python
false
false
278
py
# https://atcoder.jp/contests/abc200/tasks/abc200_c from collections import Counter n = int(input()) a = list(map(int, (input().split()))) for i in range(n): a[i] %= 200 cnt = Counter(a) ans = 0 for i, v in cnt.items(): if v>=2: ans += v*(v-1) // 2 print(ans)
c344c8404ac954642b6f02f8f20bca296c731bae
5fc6b5a420b9cb2a7d5102df55b0b5248f8199e1
/pypykatz/commons/winapi/local/function_defs/live_reader_ctypes.py
aa2bae8f03b5ebc283d8a225b8ccda4bdf88894b
[ "MIT" ]
permissive
ASkyeye/pypykatz
8e1c598d57017fd400b9a8d830ed314be7562b96
8ad07f2f6f0c4904f9a77c711f693d6c794a7fb4
refs/heads/master
2021-07-03T13:48:34.350145
2020-11-14T22:50:30
2020-11-14T22:50:30
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,565
py
import os import sys import ctypes import enum import logging from pypykatz import logger from .ntdll import * from .kernel32 import * from .psapi import * class WindowsMinBuild(enum.Enum): WIN_XP = 2500 WIN_2K3 = 3000 WIN_VISTA = 5000 WIN_7 = 7000 WIN_8 = 8000 WIN_BLUE = 9400 WIN_10 = 9800 #utter microsoft bullshit commencing.. def getWindowsBuild(): class OSVersionInfo(ctypes.Structure): _fields_ = [ ("dwOSVersionInfoSize" , ctypes.c_int), ("dwMajorVersion" , ctypes.c_int), ("dwMinorVersion" , ctypes.c_int), ("dwBuildNumber" , ctypes.c_int), ("dwPlatformId" , ctypes.c_int), ("szCSDVersion" , ctypes.c_char*128)]; GetVersionEx = getattr( ctypes.windll.kernel32 , "GetVersionExA") version = OSVersionInfo() version.dwOSVersionInfoSize = ctypes.sizeof(OSVersionInfo) GetVersionEx( ctypes.byref(version) ) return version.dwBuildNumber DELETE = 0x00010000 READ_CONTROL = 0x00020000 WRITE_DAC = 0x00040000 WRITE_OWNER = 0x00080000 SYNCHRONIZE = 0x00100000 STANDARD_RIGHTS_REQUIRED = DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER STANDARD_RIGHTS_ALL = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE if getWindowsBuild() >= WindowsMinBuild.WIN_VISTA.value: PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF else: PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFF PROCESS_QUERY_INFORMATION = 0x0400 PROCESS_VM_READ = 0x0010 #https://msdn.microsoft.com/en-us/library/windows/desktop/ms683217(v=vs.85).aspx def enum_process_names(): pid_to_name = {} for pid in EnumProcesses(): if pid == 0: continue pid_to_name[pid] = 'Not found' try: process_handle = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, pid) except Exception as e: continue pid_to_name[pid] = QueryFullProcessImageNameW(process_handle) return pid_to_name def get_lsass_pid(): pid_to_name = enum_process_names() for pid in pid_to_name: if pid_to_name[pid].lower().find('lsass.exe') != -1: return pid raise Exception('Failed to find lsass.exe') def enum_lsass_handles(): #searches for open LSASS process handles in all processes # you should be having SE_DEBUG enabled at this point RtlAdjustPrivilege(20) lsass_handles = [] sysinfohandles = NtQuerySystemInformation(16) for pid in sysinfohandles: if pid == 4: continue #if pid != GetCurrentProcessId(): # continue for syshandle in sysinfohandles[pid]: #print(pid) try: pHandle = OpenProcess(PROCESS_DUP_HANDLE, False, pid) except Exception as e: logger.debug('Error opening process %s Reason: %s' % (pid, e)) continue try: dupHandle = NtDuplicateObject(pHandle, syshandle.Handle, GetCurrentProcess(), PROCESS_QUERY_INFORMATION|PROCESS_VM_READ) #print(dupHandle) except Exception as e: logger.debug('Failed to duplicate object! PID: %s HANDLE: %s' % (pid, hex(syshandle.Handle))) continue oinfo = NtQueryObject(dupHandle, ObjectTypeInformation) if oinfo.Name.getString() == 'Process': try: pname = QueryFullProcessImageNameW(dupHandle) if pname.lower().find('lsass.exe') != -1: logger.info('Found open handle to lsass! PID: %s HANDLE: %s' % (pid, hex(syshandle.Handle))) #print('%s : %s' % (pid, pname)) lsass_handles.append((pid, dupHandle)) except Exception as e: logger.debug('Failed to obtain the path of the process! PID: %s' % pid) continue return lsass_handles
84be026c4a9decd8c8cbeb0044e6269de46348c9
c383840367c09a4aa3762d224b17b742fe53eb31
/GANs_Advanced/DiscoGAN/train_DiscoGAN_org.py
081a29ab949a0e8e7a706e48f2d192a1060b2e74
[]
no_license
qzq2514/GAN
04f3f1ff6437d6805369f28b207a8f726a112d11
a313deb08884c2ce60d4fc3834b79a8518e38f44
refs/heads/master
2020-09-21T17:32:00.913453
2020-01-17T05:02:18
2020-01-17T05:02:18
224,866,070
1
0
null
null
null
null
UTF-8
Python
false
false
5,800
py
from tensorflow.examples.tutorials.mnist import input_data from tensorflow.python.framework import graph_util import tensorflow.contrib.slim as slim from DataLoader import Pix2Pix_loader from net.DiscoGAN import DiscoGAN import tensorflow as tf import numpy as np import scipy.misc import os os.environ['CUDA_VISIBLE_DEVICES']='1' image_height = 64 image_width = 64 batch_size = 64 sample_num = 10 Train_Step = 30005 starting_rate = 0.01 change_rate = 0.5 learning_rate = 0.0002 #读取未分开的成对数据 image_dir = "/media/cgim/data/GAN/data/edges2shoes/" model_name = "DiscoGAN_1227" model_path="/media/cgim/dataset/models/"+model_name pb_path=os.path.join(model_path,"pb/") ckpt_path=os.path.join(model_path,"ckpt/") result_dir=model_path+"/result" if not os.path.exists(result_dir): os.makedirs(result_dir) if not os.path.exists(pb_path): os.makedirs(pb_path) if not os.path.exists(ckpt_path): os.makedirs(ckpt_path) def train(): input_A_place = tf.placeholder(tf.float32,shape=[None,image_height,image_width, 3],name="input_A") input_B_place = tf.placeholder(tf.float32, shape=[None, image_height,image_width, 3], name="input_B") is_training_place = tf.placeholder_with_default(False, shape=(),name="is_training") reconst_rate_place = tf.placeholder(tf.float32, shape=(),name="reconst_rate") discoGan = DiscoGAN(is_training_place,reconst_rate_place) G_loss,D_loss = discoGan.build_DiscoGAN(input_A_place,input_B_place) g_vars,d_vars = discoGan.get_vars() global_step = tf.Variable(-1, trainable=False,name="global_step") global_step_increase = tf.assign(global_step, tf.add(global_step, 1)) train_op_D = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(D_loss, var_list=d_vars) train_op_G = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(G_loss, var_list=g_vars) A2B_out,ABA_out = discoGan.sample_generate(input_A_place, "A2B") A2B_output = tf.identity(A2B_out, name="A2B_output") B2A_out,BAB_out = discoGan.sample_generate(input_B_place, "B2A") B2A_output = tf.identity(B2A_out, name="B2A_output") saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(ckpt_path) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) saver.restore(sess, os.path.join(ckpt_path, ckpt_name)) _global_step = sess.run(global_step_increase) dataLoader = Pix2Pix_loader(image_dir, image_height, image_width,batch_size=batch_size,global_step=_global_step) while _global_step<Train_Step: if _global_step<10000: reconst_rate = starting_rate else: reconst_rate = change_rate images_A,images_B = dataLoader.next_batch() #0~255 feed_dict = {input_A_place:images_A,input_B_place:images_B, is_training_place:True,reconst_rate_place:reconst_rate} if _global_step%2==0: sess.run(train_op_D,feed_dict=feed_dict) sess.run(train_op_G, feed_dict=feed_dict) _global_step,_D_loss,_G_loss = sess.run([global_step,D_loss,G_loss], feed_dict=feed_dict) if _global_step%50==0: print("Step:{},Reconst_rate:{},D_loss:{},G_loss:{}".format(_global_step,reconst_rate, _D_loss, _G_loss,)) if _global_step%100==0: test_images_A, test_images_B = dataLoader.random_next_test_batch() #save result form A to B _A2B_output,_ABA_out = sess.run([A2B_output,ABA_out],feed_dict={input_A_place:test_images_A}) _A2B_output = (_A2B_output + 1) / 2 * 255.0 _ABA_out = (_ABA_out + 1) / 2 * 255.0 for ind,trg_image in enumerate(_A2B_output[:sample_num]): scipy.misc.imsave(result_dir + "/{}_{}_A.jpg".format(_global_step,ind),test_images_A[ind]) scipy.misc.imsave(result_dir + "/{}_{}_A2B.jpg".format(_global_step,ind), _A2B_output[ind]) scipy.misc.imsave(result_dir + "/{}_{}_ABA.jpg".format(_global_step, ind), _ABA_out[ind]) # save result form B to A _B2A_output,_BAB_out = sess.run([B2A_output,BAB_out], feed_dict={input_B_place: test_images_B}) _B2A_output = (_B2A_output + 1) / 2 * 255.0 _BAB_out = (_BAB_out + 1) / 2 * 255.0 for ind,trg_image in enumerate(_B2A_output[:sample_num]): scipy.misc.imsave(result_dir + "/{}_{}_B.jpg".format(_global_step,ind),test_images_B[ind]) scipy.misc.imsave(result_dir + "/{}_{}_B2A.jpg".format(_global_step,ind), _B2A_output[ind]) scipy.misc.imsave(result_dir + "/{}_{}_BAB.jpg".format(_global_step, ind), _BAB_out[ind]) if _global_step==Train_Step-5: # 保存PB constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["A2B_output","B2A_output"]) save_model_name = model_name + "-" + str(_global_step) + ".pb" with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw: fw.write(constant_graph.SerializeToString()) # 保存CKPT saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=_global_step) print("Successfully saved model {}".format(save_model_name)) return _global_step = sess.run(global_step_increase) if __name__ == '__main__': train()
d235aec102d27ca4fae3b8e5d215f502675ae6fb
17c366bf8aa9fed59fb3d91db06142860cb9ce38
/nbs/examples/mnist_blocks.py
14043be821f6c97c3bf782edb3b9b4b097f38029
[ "Apache-2.0" ]
permissive
dienhoa/fastai
3f4884f9fb96f9e5199e33b959478dfa0bbfa0d4
fdce0330e05ae02db90c3456f9fc2827c3cf86a0
refs/heads/master
2022-04-14T06:27:52.994595
2022-04-13T21:24:27
2022-04-13T21:24:27
154,803,492
0
0
Apache-2.0
2018-10-26T08:38:44
2018-10-26T08:38:43
null
UTF-8
Python
false
false
422
py
from fastai.vision.all import * splitter = GrandparentSplitter(train_name='training', valid_name='testing') mnist = DataBlock(blocks=(ImageBlock(PILImageBW), CategoryBlock), get_items=get_image_files, splitter=splitter, get_y=parent_label) if __name__ == '__main__': data = mnist.dataloaders(untar_data(URLs.MNIST), bs=256) learn = cnn_learner(data, resnet18) learn.fit_one_cycle(1, 1e-2)
f178b663d0ee93882d7f0f23f79762c86c9a62b3
d3efc82dfa61fb82e47c82d52c838b38b076084c
/Autocase_Result/ReverseRepo/YW_NHG_SHHG_019_GC028.py
697b950c9b5b4c9f6d0da0feb24a47bcfb16928d
[]
no_license
nantongzyg/xtp_test
58ce9f328f62a3ea5904e6ed907a169ef2df9258
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
refs/heads/master
2022-11-30T08:57:45.345460
2020-07-30T01:43:30
2020-07-30T01:43:30
280,388,441
0
0
null
null
null
null
UTF-8
Python
false
false
3,026
py
#!/usr/bin/python # -*- encoding: utf-8 -*- import sys sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api") from xtp_test_case import * sys.path.append("/home/yhl2/workspace/xtp_test/service") from ServiceConfig import * from mainService import * from QueryStkPriceQty import * from log import * sys.path.append("/home/yhl2/workspace/xtp_test/mysql") from CaseParmInsertMysql import * sys.path.append("/home/yhl2/workspace/xtp_test/utils") from QueryOrderErrorMsg import queryOrderErrorMsg class YW_NHG_SHHG_019_GC028(xtp_test_case): # YW_NHG_SHHG_019_GC028 def test_YW_NHG_SHHG_019_GC028(self): title = '上海逆回购--数量(等于100万张)-28天' # 定义当前测试用例的期待值 # 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单 # xtp_ID和cancel_xtpID默认为0,不需要变动 case_goal = { '期望状态': '全成', 'errorID': 0, 'errorMSG': '', '是否生成报单': '是', '是否是撤废': '否', 'xtp_ID': 0, 'cancel_xtpID': 0, } logger.warning(title) # 定义委托参数信息------------------------------------------ # 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api stkparm = QueryStkPriceQty('204028', '1', '12', '2', '0', 'S', case_goal['期望状态'], Api) # 如果下单参数获取失败,则用例失败 if stkparm['返回结果'] is False: rs = { '用例测试结果': stkparm['返回结果'], '测试错误原因': '获取下单参数失败,' + stkparm['错误原因'], } self.assertEqual(rs['用例测试结果'], True) else: wt_reqs = { 'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_REPO'], 'order_client_id':2, 'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'], 'ticker': stkparm['证券代码'], 'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'], 'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'], 'price': stkparm['随机中间价'], 'quantity': 1000000, 'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT'] } ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type']) CaseParmInsertMysql(case_goal, wt_reqs) rs = serviceTest(Api, case_goal, wt_reqs) logger.warning('执行结果为' + str(rs['用例测试结果']) + ',' + str(rs['用例错误源']) + ',' + str(rs['用例错误原因'])) self.assertEqual(rs['用例测试结果'], True) # 0 if __name__ == '__main__': unittest.main()
e3890f86efe95e867f60a04ad1fb1640b5b9c625
6a253ee7b47c5f70c826bbc97bb8e33cd1dab3b6
/4.Working with Dask Bags for Unstructured Data/Filtering vetoed bills.py
f6f1b993c692dc6f8cda3afb05d26a40595ed1aa
[]
no_license
Mat4wrk/Parallel-Programming-with-Dask-in-Python-Datacamp
19a646d6d16ff46173964c25639ff923407c8f32
535f69b78adb50cffc7f402f81ddff19f853eea1
refs/heads/main
2023-03-06T19:52:39.495066
2021-02-13T13:27:06
2021-02-13T13:27:06
338,565,569
2
0
null
null
null
null
UTF-8
Python
false
false
279
py
# Filter the bills: overridden overridden = bills_dicts.filter(veto_override) # Print the number of bills retained print(overridden.count().compute()) # Get the value of the 'title' key titles = overridden.pluck('title') # Compute and print the titles print(titles.compute())
ccd7c753cf3f1a7e04ca7b256c5f92fffcc69c25
3b2e30a6f082b4b21818eae44ea2f55fc25e7aa2
/project/cart/views.py
51a36c3411656d1a5ebb2b1e76ab2d20290d4d53
[]
no_license
alekseykonotop/online_store_django
d9e9941ddedd783b38b5592ab2a3af5e35f0c2ee
183cb3680b5b8f90457ea144dafaa96c13a3433d
refs/heads/master
2020-07-30T09:13:57.449081
2019-11-07T19:46:58
2019-11-07T19:46:58
210,168,644
0
0
null
2020-06-05T23:07:09
2019-09-22T15:19:34
JavaScript
UTF-8
Python
false
false
942
py
from django.shortcuts import render, redirect, get_object_or_404 from django.views.decorators.http import require_POST from store.models import Product, Category from .cart import Cart from .forms import CartAddProductForm @require_POST def cart_add(request, product_id): cart = Cart(request) product = get_object_or_404(Product, id=product_id) form = CartAddProductForm(request.POST) if form.is_valid(): cd = form.cleaned_data cart.add(product=product, quantity=cd['quantity'], update_quantity=cd['update']) return redirect('cart:cart_detail') def cart_remove(request, product_id): cart = Cart(request) product = get_object_or_404(Product, id=product_id) cart.remove(product) return redirect('cart:cart_detail') def cart_detail(request): context = {} context['cart'] = Cart(request) return render(request, 'cart/detail.html', context)
c8214a41a82f875f402de97e2db11c439208e33c
cf2ec51dfcb2d6777b5045137d2bcfe62afdec8c
/upvcarshare/core/templatetags/core_tags.py
9e4b4acedbe5f435252e61b22be188f25d1f1041
[]
no_license
morrme/upvcarshare
c4b8b1587370e7931d8b5d6c78b948188617795c
189c91c608d0b61f6b68ef5c49a2546fdbbe38a2
refs/heads/master
2021-01-22T22:07:52.611880
2017-05-29T14:57:36
2017-05-29T14:57:36
88,732,669
0
0
null
2017-04-19T10:33:58
2017-04-19T10:33:58
null
UTF-8
Python
false
false
1,295
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function, absolute_import from django import template from django.conf import settings from django.http import QueryDict from journeys import DEFAULT_GOOGLE_MAPS_SRID from journeys.helpers import make_point register = template.Library() @register.simple_tag def google_static_map(point, width=600, height=300, zoom=13): google_maps_point = make_point(point, origin_coord_srid=point.srid, destiny_coord_srid=DEFAULT_GOOGLE_MAPS_SRID) base_uri = "https://maps.googleapis.com/maps/api/staticmap" args = { "maptype": "roadmap", "zoom": zoom, "size": "{}x{}".format(width, height), "key": settings.GOOGLE_MAPS_API_KEY, "center": "{},{}".format(google_maps_point.coords[1], google_maps_point.coords[0]), "markers": "color:red|{},{}".format(google_maps_point.coords[1], google_maps_point.coords[0]), } query_dict = QueryDict(mutable=True) query_dict.update(args) return "{}?{}".format(base_uri, query_dict.urlencode()) @register.simple_tag(takes_context=True) def add_active_class(context, names, _class="active"): request = context["request"] names = names.split(",") return _class if request.resolver_match.view_name in names else ""
26cb68b12f6852ef885417963ed3f227dde4232b
ad6681ec221fddc78956d45182f22bd8f1aae8e1
/基础班/python基础班作业/zuoye5.py
d7778e78f4774e78cf7432ba9bdc60433604db33
[]
no_license
caoxp930/MyPythonCode
cb2428fd7078100df0b118f64713b7db76fe1e23
6b7e17b23fbaddcc69812ba7a14a0a5ad548ad4b
refs/heads/master
2023-03-15T01:22:17.847582
2021-03-02T12:37:09
2021-03-02T12:37:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
151
py
# -*- coding: utf-8 -*- for i in range(1,10): for j in range(1,i+1): print(j,'*',i,'=',i*j,end='\t') if i == j: print()
774d14b2179139ab271f99c788c217d85202583e
f61db5940e29773aba8fc342a21de00e91a5ab2e
/base/day9/02python操作文件.py
a7f6d0023d40ea4faa5dadda9bbcdb01e1cb4462
[]
no_license
liyaozr/project
c17a9dcbcda38fe9a15ec4c41a01242a13695991
0b0fc10e267ceb19f6792b490fede177035459fe
refs/heads/master
2020-11-29T18:38:03.297369
2020-03-10T01:11:00
2020-03-10T01:11:00
230,190,916
0
0
null
null
null
null
UTF-8
Python
false
false
1,339
py
""" ============================ Author:柠檬班-木森 Time:2020/1/13 20:28 E-mail:[email protected] Company:湖南零檬信息技术有限公司 ============================ """ """ python操作文件 open的常用参数: 第一个:要打开的文件名字或者文件路径 第二个参数:文件打开的模式 r:只读模式 rb:只读模式,以二进制的编码格式去打开文件 第三个参数: encoding:用来指定打开文件的编码格式(使用rb的时候,不需要加该参数) """ # 读取同级目录下的文件,可以直接写文件名 # 打开文件 # f = open("01内置函数的补充.py", "r", encoding="utf8") # # 读取不在同一个目录下的文件,要写上文件的完整路径 # f = open(r"C:\project\py26_project\py26_01day\02python中的数值.py", "r", encoding="utf8") # # # 读取内容 # content = f.read() # # # 打印读取出来的内容 # print("文件中读取出来的内容为:", content) # # # 关闭文件 # f.close() # -------------------如何去读取图片、视频等文件---------------- # 读取不在同一个目录下的文件,要写上文件的完整路径 f = open("bj2.png", "rb") # 读取内容 content = f.read() # 打印读取出来的内容 print("文件中读取出来的内容为:", content) # 关闭文件 f.close()
de1275ebc2f6aa4b9161b36c637abba3cfb8339b
055b7c4c2118e6e862cfae344d722e8e90534cb4
/config.py
5aa53ff4827b52082755f58b81f4fb855ebf1ae7
[]
no_license
Omulosi/iReporter
745b3194f5a06371ca01c4d790cac763a09cf89f
db80d76b84d786330fb389d94c2623cbbad13be9
refs/heads/develop
2022-12-09T13:42:32.856875
2019-04-23T04:14:27
2019-04-23T04:14:27
158,638,861
0
0
null
null
null
null
UTF-8
Python
false
false
1,162
py
''' instance.config ------------------ This module provides default configuration values. ''' import os from datetime import timedelta from dotenv import load_dotenv basedir = os.path.abspath(os.path.dirname(__file__)) load_dotenv(os.path.join(basedir, '.env')) class Config: ''' Base configuration values ''' SECRET_KEY = os.environ.get('SECRET_KEY') JWT_SECRET_KEY = os.environ.get('JWT_SECRET_KEY') JWT_ACCESS_TOKEN_EXPIRES = timedelta(minutes=60) JWT_BLACKLIST_ENABLED = True JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh'] PROPAGATE_EXCEPTIONS = True #: Database url DATABASE = os.environ.get('DATABASE_URL') #: Mail server configuration values MAIL_SERVER=os.environ.get('MAIL_SERVER') MAIL_PORT=os.environ.get('MAIL_PORT') MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') MAIL_USE_TLS=os.environ.get('MAIL_USE_TLS') MAIL_USERNAME=os.environ.get('MAIL_USERNAME') class TestConfig(Config): ''' configuration values for testing ''' TESTING = True DEBUG = True PROPAGATE_EXCEPTIONS = True DATABASE = os.environ.get('TEST_DB_URL')
2355da4fe0a15ebbd2427a4c7f7b891e2e2ad149
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
/PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1x_coco.py
4fb90266f00299d6ac45e49f928e81c2c3eb7535
[ "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later" ]
permissive
Ascend/ModelZoo-PyTorch
4c89414b9e2582cef9926d4670108a090c839d2d
92acc188d3a0f634de58463b6676e70df83ef808
refs/heads/master
2023-07-19T12:40:00.512853
2023-07-17T02:48:18
2023-07-17T02:48:18
483,502,469
23
6
Apache-2.0
2022-10-15T09:29:12
2022-04-20T04:11:18
Python
UTF-8
Python
false
false
6,403
py
# -*- coding: utf-8 -*- # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) total_epochs = 12 model = dict( type='FasterRCNN', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)))) # model training and testing settings train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # soft-nms is also supported for rcnn testing # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) ) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox')
bc3798906716aa41be8beb4ecc2a2c58459a8f86
dc767b48d46e2f6b9851ce61914e880fc95fe520
/myshop/shop/migrations/0001_initial.py
cca6008d38c1c259f458a69c4f61f46f334c2252
[]
no_license
EdmilsonSantana/django-by-example
c06081a1a3915aaf3996d017fea91c8273cbe2e0
7c895b55b8f6fcc05a2d5cd2181bf207dc9256fc
refs/heads/master
2021-01-12T02:58:49.261515
2017-02-28T20:05:29
2017-02-28T20:05:29
78,144,090
0
0
null
null
null
null
UTF-8
Python
false
false
2,044
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-16 23:59 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(db_index=True, max_length=200)), ('slug', models.SlugField(max_length=200, unique=True)), ], options={ 'verbose_name': 'category', 'verbose_name_plural': 'categories', 'ordering': ('name',), }, ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(db_index=True, max_length=200)), ('slug', models.SlugField(max_length=200)), ('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')), ('description', models.TextField(blank=True)), ('price', models.DecimalField(decimal_places=2, max_digits=10)), ('stock', models.PositiveIntegerField()), ('available', models.BooleanField(default=True)), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Category')), ], options={ 'ordering': ('-created',), }, ), migrations.AlterIndexTogether( name='product', index_together=set([('id', 'slug')]), ), ]
78a758b50b7c3ecb4bb6e5761d61565d2eb317a5
2c5b25d0b5d6ba66d013251f93ebf4c642fd787b
/wrong_answer_codes/Contiguous_Array/Contiguous Array_324757576.py
1c620fdc45f25037006caf70d00f3c54a4797b19
[]
no_license
abhinay-b/Leetcode-Submissions
da8099ac54b5d36ae23db42580064d0f9d9bc63b
d034705813f3f908f555f1d1677b827af751bf42
refs/heads/master
2022-10-15T22:09:36.328967
2020-06-14T15:39:17
2020-06-14T15:39:17
259,984,100
2
0
null
null
null
null
UTF-8
Python
false
false
787
py
class Solution: def findMaxLength(self, nums: List[int]) -> int: count = [0]*2 start = end = 0 maxVal = 0 for idx,num in enumerate(nums): count[num] += 1 diff = abs(count[0] - count[1]) # print(diff,start,end) if diff > 1: count[nums[start]] -= 1 start += 1 elif diff == 1 and start > 0 and (count[nums[start-1]] + 1 == count[1-nums[start -1]]): start -= 1 count[nums[start]] += 1 end = idx maxVal = max(maxVal, end - start+1) elif not diff: end = idx maxVal = max(maxVal, end - start+1) return maxVal
edcb724454b921fe8dc091a316470e10f89459df
6cea6b8cfeef78b433e296c38ef11f4637609f20
/src/collectors/ipmisensor/test/testipmisensor.py
66a79164c5d9b0f45141583e0676c31a4b5b8902
[ "MIT" ]
permissive
philipcristiano/Diamond
b659d577ec054c06ab99308d6c2ba3163de84e1a
577270ea820af597458aa5d3325367608cd37845
refs/heads/master
2021-01-18T10:04:59.057835
2012-08-02T04:08:02
2012-08-02T04:08:02
3,140,864
0
0
null
null
null
null
UTF-8
Python
false
false
2,392
py
#!/usr/bin/python ################################################################################ from test import * from diamond.collector import Collector from ipmisensor import IPMISensorCollector ################################################################################ class TestIPMISensorCollector(CollectorTestCase): def setUp(self): config = get_collector_config('IPMISensorCollector', { 'interval': 10, 'bin' : 'true', 'use_sudo' : False }) self.collector = IPMISensorCollector(config, None) @patch('os.access', Mock(return_value=True)) @patch.object(Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): with patch('subprocess.Popen.communicate', Mock(return_value = ( self.getFixture('ipmitool.out').getvalue() , '') )): self.collector.collect() self.assertPublishedMany(publish_mock, { 'System.Temp' : 32.000000, 'CPU1.Vcore' : 1.080000, 'CPU2.Vcore' : 1.000000, 'CPU1.VTT' : 1.120000, 'CPU2.VTT' : 1.176000, 'CPU1.DIMM' : 1.512000, 'CPU2.DIMM' : 1.512000, '+1_5V' : 1.512000, '+1_8V' : 1.824000, '+5V' : 4.992000, '+12V' : 12.031000, '+1_1V' : 1.112000, '+3_3V' : 3.288000, '+3_3VSB' : 3.240000, 'VBAT' : 3.240000, 'Fan1' : 4185.000000, 'Fan2' : 4185.000000, 'Fan3' : 4185.000000, 'Fan7' : 3915.000000, 'Fan8' : 3915.000000, 'Intrusion' : 0.000000, 'PS.Status' : 0.000000, 'P1-DIMM1A.Temp' : 41.000000, 'P1-DIMM1B.Temp' : 39.000000, 'P1-DIMM2A.Temp' : 38.000000, 'P1-DIMM2B.Temp' : 40.000000, 'P1-DIMM3A.Temp' : 37.000000, 'P1-DIMM3B.Temp' : 38.000000, 'P2-DIMM1A.Temp' : 39.000000, 'P2-DIMM1B.Temp' : 38.000000, 'P2-DIMM2A.Temp' : 39.000000, 'P2-DIMM2B.Temp' : 39.000000, 'P2-DIMM3A.Temp' : 39.000000, 'P2-DIMM3B.Temp' : 40.000000, }) ################################################################################ if __name__ == "__main__": unittest.main()
a4bcbc3ea13c6d7161096668057371a82bc97ec8
e7ea544475ebfa70ebdf5d5949bde9e23edc60ba
/gbp/scripts/common/buildpackage.py
e1edfb29587dfad1895660c095e2fe13141cba7b
[]
no_license
dcoshea/git-buildpackage
80cb7d890222488663a09e3d790fc5e985f791b9
f4aa76bfcda1ded4649cd071b123ef8d7bf2344d
refs/heads/master
2020-05-26T21:05:37.574986
2017-02-19T13:17:11
2017-02-19T13:17:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,061
py
# vim: set fileencoding=utf-8 : # # (C) 2006-2011, 2016 Guido Guenther <[email protected]> # (C) 2012 Intel Corporation <[email protected]> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, please see # <http://www.gnu.org/licenses/> # """Common functionality for Debian and RPM buildpackage scripts""" import os import os.path import pipes import tempfile import shutil from gbp.command_wrappers import (CatenateTarArchive, CatenateZipArchive) from gbp.git import GitRepositoryError from gbp.errors import GbpError import gbp.log # when we want to reference the index in a treeish context we call it: index_name = "INDEX" # when we want to reference the working copy in treeish context we call it: wc_name = "WC" def sanitize_prefix(prefix): """ Sanitize the prefix used for generating source archives >>> sanitize_prefix('') '/' >>> sanitize_prefix('foo/') 'foo/' >>> sanitize_prefix('/foo/bar') 'foo/bar/' """ if prefix: return prefix.strip('/') + '/' return '/' def git_archive_submodules(repo, treeish, output, prefix, comp_type, comp_level, comp_opts, format='tar'): """ Create a source tree archive with submodules. Concatenates the archives generated by git-archive into one and compresses the end result. Exception handling is left to the caller. """ prefix = sanitize_prefix(prefix) tempdir = tempfile.mkdtemp() main_archive = os.path.join(tempdir, "main.%s" % format) submodule_archive = os.path.join(tempdir, "submodule.%s" % format) try: # generate main (tmp) archive repo.archive(format=format, prefix=prefix, output=main_archive, treeish=treeish) # generate each submodule's archive and append it to the main archive for (subdir, commit) in repo.get_submodules(treeish): tarpath = [subdir, subdir[2:]][subdir.startswith("./")] gbp.log.debug("Processing submodule %s (%s)" % (subdir, commit[0:8])) repo.archive(format=format, prefix='%s%s/' % (prefix, tarpath), output=submodule_archive, treeish=commit, cwd=subdir) if format == 'tar': CatenateTarArchive(main_archive)(submodule_archive) elif format == 'zip': CatenateZipArchive(main_archive)(submodule_archive) # compress the output if comp_type: # Redirect through stdout directly to the correct output file in # order to avoid determining the output filename of the compressor try: comp_level_opt = '-%d' % comp_level if comp_level is not None else '' except TypeError: raise GbpError("Invalid compression level '%s'" % comp_level) ret = os.system("%s --stdout %s %s %s > %s" % (comp_type, comp_level_opt, comp_opts, main_archive, output)) if ret: raise GbpError("Error creating %s: %d" % (output, ret)) else: shutil.move(main_archive, output) finally: shutil.rmtree(tempdir) def git_archive_single(treeish, output, prefix, comp_type, comp_level, comp_opts, format='tar'): """ Create an archive without submodules Exception handling is left to the caller. """ prefix = sanitize_prefix(prefix) pipe = pipes.Template() pipe.prepend("git archive --format=%s --prefix=%s %s" % (format, prefix, treeish), '.-') try: comp_level_opt = '-%d' % comp_level if comp_level is not None else '' except TypeError: raise GbpError("Invalid compression level '%s'" % comp_level) if comp_type: pipe.append('%s -c %s %s' % (comp_type, comp_level_opt, comp_opts), '--') ret = pipe.copy('', output) if ret: raise GbpError("Error creating %s: %d" % (output, ret)) # Functions to handle export-dir def dump_tree(repo, export_dir, treeish, with_submodules, recursive=True): "dump a tree to output_dir" output_dir = os.path.dirname(export_dir) prefix = sanitize_prefix(os.path.basename(export_dir)) if recursive: paths = [] else: paths = ["'%s'" % nam for _mod, typ, _sha, nam in repo.list_tree(treeish) if typ == 'blob'] pipe = pipes.Template() pipe.prepend('git archive --format=tar --prefix=%s %s -- %s' % (prefix, treeish, ' '.join(paths)), '.-') pipe.append('tar -C %s -xf -' % output_dir, '-.') top = os.path.abspath(os.path.curdir) try: ret = pipe.copy('', '') if ret: raise GbpError("Error in dump_tree archive pipe") if recursive and with_submodules: if repo.has_submodules(): repo.update_submodules() for (subdir, commit) in repo.get_submodules(treeish): gbp.log.info("Processing submodule %s (%s)" % (subdir, commit[0:8])) tarpath = [subdir, subdir[2:]][subdir.startswith("./")] os.chdir(subdir) pipe = pipes.Template() pipe.prepend('git archive --format=tar --prefix=%s%s/ %s' % (prefix, tarpath, commit), '.-') pipe.append('tar -C %s -xf -' % output_dir, '-.') ret = pipe.copy('', '') os.chdir(top) if ret: raise GbpError("Error in dump_tree archive pipe in submodule %s" % subdir) except OSError as err: gbp.log.err("Error dumping tree to %s: %s" % (output_dir, err[0])) return False except (GitRepositoryError, GbpError) as err: gbp.log.err(err) return False except Exception as e: gbp.log.err("Error dumping tree to %s: %s" % (output_dir, e)) return False finally: os.chdir(top) return True def wc_index(repo): """Get path of the temporary index file used for exporting working copy""" return os.path.join(repo.git_dir, "gbp_index") def write_wc(repo, force=True): """write out the current working copy as a treeish object""" index_file = wc_index(repo) repo.add_files(repo.path, force=force, index_file=index_file) tree = repo.write_tree(index_file=index_file) return tree def drop_index(repo): """drop our custom index""" index_file = wc_index(repo) if os.path.exists(index_file): os.unlink(index_file)
a46afda8041485109144a60243600a990bd2b7d1
c0d5b7f8e48a26c6ddc63c76c43ab5b397c00028
/tests/columns/test_array.py
731e15ff8b962d66534e989094fe5f8cbef23a93
[ "MIT" ]
permissive
aminalaee/piccolo
f6c5e5e1c128568f7ccb9ad1dfb4746acedae262
af8d2d45294dcd84f4f9b6028752aa45b699ec15
refs/heads/master
2023-07-14T09:44:04.160116
2021-07-11T22:56:27
2021-07-11T22:56:27
386,398,401
0
0
MIT
2021-07-15T19:32:50
2021-07-15T19:08:17
null
UTF-8
Python
false
false
2,199
py
from unittest import TestCase from piccolo.table import Table from piccolo.columns.column_types import Array, Integer from tests.base import postgres_only class MyTable(Table): value = Array(base_column=Integer()) class TestArrayPostgres(TestCase): """ Make sure an Array column can be created. """ def setUp(self): MyTable.create_table().run_sync() def tearDown(self): MyTable.alter().drop_table().run_sync() def test_storage(self): """ Make sure data can be stored and retrieved. """ MyTable(value=[1, 2, 3]).save().run_sync() row = MyTable.objects().first().run_sync() self.assertEqual(row.value, [1, 2, 3]) @postgres_only def test_index(self): """ Indexes should allow individual array elements to be queried. """ MyTable(value=[1, 2, 3]).save().run_sync() self.assertEqual( MyTable.select(MyTable.value[0]).first().run_sync(), {"value": 1} ) @postgres_only def test_all(self): """ Make sure rows can be retrieved where all items in an array match a given value. """ MyTable(value=[1, 1, 1]).save().run_sync() self.assertEqual( MyTable.select(MyTable.value) .where(MyTable.value.all(1)) .first() .run_sync(), {"value": [1, 1, 1]}, ) self.assertEqual( MyTable.select(MyTable.value) .where(MyTable.value.all(0)) .first() .run_sync(), None, ) def test_any(self): """ Make sure rows can be retrieved where any items in an array match a given value. """ MyTable(value=[1, 2, 3]).save().run_sync() self.assertEqual( MyTable.select(MyTable.value) .where(MyTable.value.any(1)) .first() .run_sync(), {"value": [1, 2, 3]}, ) self.assertEqual( MyTable.select(MyTable.value) .where(MyTable.value.any(0)) .first() .run_sync(), None, )
3f259779a113f38727e5e331c041593a3830edfe
caaf56727714f8c03be38710bc7d0434c3ec5b11
/tests/components/telegram/test_notify.py
7488db49d9ea58db8f78e93cab0842fa686ee119
[ "Apache-2.0" ]
permissive
tchellomello/home-assistant
c8db86880619d7467901fd145f27e0f2f1a79acc
ed4ab403deaed9e8c95e0db728477fcb012bf4fa
refs/heads/dev
2023-01-27T23:48:17.550374
2020-09-18T01:18:55
2020-09-18T01:18:55
62,690,461
8
1
Apache-2.0
2023-01-13T06:02:03
2016-07-06T04:13:49
Python
UTF-8
Python
false
false
1,598
py
"""The tests for the telegram.notify platform.""" from os import path from homeassistant import config as hass_config import homeassistant.components.notify as notify from homeassistant.components.telegram import DOMAIN from homeassistant.const import SERVICE_RELOAD from homeassistant.setup import async_setup_component from tests.async_mock import patch async def test_reload_notify(hass): """Verify we can reload the notify service.""" with patch("homeassistant.components.telegram_bot.async_setup", return_value=True): assert await async_setup_component( hass, notify.DOMAIN, { notify.DOMAIN: [ { "name": DOMAIN, "platform": DOMAIN, "chat_id": 1, }, ] }, ) await hass.async_block_till_done() assert hass.services.has_service(notify.DOMAIN, DOMAIN) yaml_path = path.join( _get_fixtures_base_path(), "fixtures", "telegram/configuration.yaml", ) with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path): await hass.services.async_call( DOMAIN, SERVICE_RELOAD, {}, blocking=True, ) await hass.async_block_till_done() assert not hass.services.has_service(notify.DOMAIN, DOMAIN) assert hass.services.has_service(notify.DOMAIN, "telegram_reloaded") def _get_fixtures_base_path(): return path.dirname(path.dirname(path.dirname(__file__)))
3169f03ad1a82380f124de333e6a15857ecf1ae8
4fc21c3f8dca563ce8fe0975b5d60f68d882768d
/GodwillOnyewuchi/Phase 1/Python Basic 2/day 12 task/task10.py
a4924e40fbc8159a266fbfd0579729acab934db6
[ "MIT" ]
permissive
Uche-Clare/python-challenge-solutions
17e53dbedbff2f33e242cf8011696b3059cd96e9
49ede6204ee0a82d5507a19fbc7590a1ae10f058
refs/heads/master
2022-11-13T15:06:52.846937
2020-07-10T20:59:37
2020-07-10T20:59:37
266,404,840
1
0
MIT
2020-05-23T19:24:56
2020-05-23T19:24:55
null
UTF-8
Python
false
false
290
py
# Python program to get numbers divisible by fifteen from a list using an anonymous function def divisibleby15(lists): newList = [] for i in lists: if i % 15 == 0: newList.append(i) return newList print(divisibleby15([23, 56, 12, 15, 45, 23, 70, 678, 90]))
ce6667dc95fdefc8be193b41ae44902d4600a89a
7a9c01f7029e74c697100e244d26c72d0e283d47
/models/amenity.py
9adbf8d9f5418e8b43eeb584cccd1acbde12617c
[]
no_license
toyugo/holbertonschool-AirBnB_clone
63321296ecee98b1a0cda39c7b155cc2ea5ececb
5edaeafb6516130f2027b505fe8b168f6f9de174
refs/heads/main
2023-03-21T06:32:18.728878
2021-03-04T13:08:56
2021-03-04T13:08:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
168
py
#!/usr/bin/python3 """ Module Amenity """ from models.base_model import BaseModel class Amenity(BaseModel): """ Class Amenity base en BaseModel """ name = ""
c4b2fcaa8f6499cdca69575ead3662b305b1ccd5
4ed33dba672aa6aaef42698ef8437c872b078d37
/backend/home/migrations/0001_load_initial_data.py
e78b5b69ad3761f691200103468335142fc62434
[]
no_license
crowdbotics-apps/flat-heart-27928
aecb93c66e39e94e01cef7fe9506effe994cde18
ce209de8910b1e9f006814b58a05aed1eeada32d
refs/heads/master
2023-05-26T14:51:41.045373
2021-06-11T20:01:34
2021-06-11T20:01:34
376,130,678
0
0
null
null
null
null
UTF-8
Python
false
false
538
py
from django.db import migrations def create_site(apps, schema_editor): Site = apps.get_model("sites", "Site") custom_domain = "flat-heart-27928.botics.co" site_params = { "name": "Flat Heart", } if custom_domain: site_params["domain"] = custom_domain Site.objects.update_or_create(defaults=site_params, id=1) class Migration(migrations.Migration): dependencies = [ ("sites", "0002_alter_domain_unique"), ] operations = [ migrations.RunPython(create_site), ]
6bc05f1c24acd83be18b9337a531c43c42f39d63
6e928e1651713f945c980bca6d6c02ac5dce249a
/task1/5.py
64b92c59d071daed1a062f5bbc9c61742d9564d9
[]
no_license
Akzhan12/pp2
97334158b442383df32583ee6c0b9cab92a3ef45
56e33fd9119955ea8349172bf3f2cc5fbd814142
refs/heads/main
2023-06-28T08:30:11.068397
2021-07-29T08:34:43
2021-07-29T08:34:43
337,359,826
1
0
null
null
null
null
UTF-8
Python
false
false
211
py
a = list(map(int,input().split())) n = int(input()) % len(a) if n < 0: n = abs(n) print(*a[n:],end = " ") print(*a[0:n]) else: n = abs(n) print(*a[-n:],end = " ") print(*a[0:-n])
e5ab44dc776222c231274dd703bcd5aebdb8b110
f207586e34b37b13ee6012ea08f174e302fa0078
/mimo/util/decorate.py
cf41979d6dfcac6b024ecd468df4e0901d8627e7
[ "MIT" ]
permissive
pnickl/mimo
92b7858108e077ff43082f15f635d1205120b143
81c4bbd2594e2136445009eae752ab8a1602a1cf
refs/heads/master
2022-12-24T02:10:34.838878
2020-08-04T19:24:21
2020-08-04T19:24:21
302,394,694
2
0
MIT
2020-10-08T16:07:26
2020-10-08T16:07:25
null
UTF-8
Python
false
false
1,796
py
def pass_obs_arg(f): def wrapper(self, obs=None, **kwargs): if obs is None: assert self.has_data() obs = [_obs for _obs in self.obs] else: obs = obs if isinstance(obs, list) else [obs] return f(self, obs, **kwargs) return wrapper def pass_obs_and_labels_arg(f): def wrapper(self, obs=None, labels=None, **kwargs): if obs is None or labels is None: assert self.has_data() obs = [_obs for _obs in self.obs] labels = self.labels else: obs = obs if isinstance(obs, list) else [obs] labels = [self.gating.likelihood.rvs(len(_obs)) for _obs in obs]\ if labels is None else labels return f(self, obs, labels, **kwargs) return wrapper def pass_target_and_input_arg(f): def wrapper(self, y=None, x=None, **kwargs): if y is None or x is None: assert self.has_data() y = [_y for _y in self.target] x = [_x for _x in self.input] else: y = y if isinstance(y, list) else [y] x = x if isinstance(x, list) else [x] return f(self, y, x, **kwargs) return wrapper def pass_target_input_and_labels_arg(f): def wrapper(self, y=None, x=None, z=None, **kwargs): if y is None or x is None and z is None: assert self.has_data() y = [_y for _y in self.target] x = [_x for _x in self.input] z = self.labels else: y = y if isinstance(y, list) else [y] x = x if isinstance(x, list) else [x] z = [self.gating.likelihood.rvs(len(_y)) for _y in y]\ if z is None else z return f(self, y, x, z, **kwargs) return wrapper
b9c5ca1798fcaffb1707909fd79abe2418769bda
04ac33f68827aeef7d5bc441d10979143828ef1a
/contactSpider.py
037682c5a672fc9a935a9454eaef442e24e5a338
[]
no_license
samshultz/realtor_agent_spider
a06e99af15fc78902c5f44fcb91dd6d55490b14f
4550301a9e4733ad19bd6fd904e079037847bbf7
refs/heads/master
2021-07-05T04:28:17.703484
2017-09-30T02:22:34
2017-09-30T02:22:34
105,333,052
0
0
null
null
null
null
UTF-8
Python
false
false
1,933
py
import scrapy class ContactSpider(scrapy.Spider): # name of the spider name = "contacts" # the url to start scraping from start_urls = [ "https://www.realtor.com/realestateagents/Los-Angeles_CA" ] def parse(self, response): # check the page for the name of the agent... for href in response.css("div[itemprop=name] a::attr(href)"): # ...click on it and call the parse_agent method on each one yield response.follow(href, self.parse_agent) # follow pagination links... # for href in response.css("a.next::attr(href)"): # #...repeat this method (parse method) on each page # yield response.follow(href, self.parse) def parse_agent(self, response): # get the element containing the address info and extract the text address = response.css("#modalcontactInfo span[itemprop=streetAddress]::text").extract_first() # check if the address is available... if address is not None: # ... if it is, get the city, state and zipcode from it (this info # is contained in the last three info in the address) city, state, zipcode = address.split(",")[-3:] # separate the address addr = ''.join(address.split(",")[:-3]) else: # if the address is not available # set the city, state, addr and zipcode to empty string city, state, zipcode = "", "", "" addr = "" # return a dictionary of the extracted info yield { "name": response.css("#modalcontactInfo p.modal-agent-name::text").extract_first().split(",")[0], "location": response.css("#modalcontactInfo p.modal-agent-location::text").extract_first().strip(), "address": addr, "city": city, "state": state, "zipcode": zipcode, }
43078cfccfee9f2bbde2f0af3de46006b564a128
0725ed7ab6be91dfc0b16fef12a8871c08917465
/tree/is_bst.py
26ed670c86a2703f7550da0fa62852b62ed81d7b
[]
no_license
siddhism/leetcode
8cb194156893fd6e9681ef50c84f0355d09e9026
877933424e6d2c590d6ac53db18bee951a3d9de4
refs/heads/master
2023-03-28T08:14:12.927995
2021-03-24T10:46:20
2021-03-24T10:46:20
212,151,205
0
0
null
null
null
null
UTF-8
Python
false
false
716
py
# A binary tree node import sys class Node: # Constructor to create a new node def __init__(self, data): self.data = data self.left = None self.right = None def is_bst(node, min_limit, max_limit): if not node: return True if not (min_limit < node.data < max_limit): return False l_path = is_bst(node.left, min_limit, node.data) r_path = is_bst(node.right, node.data, max_limit) return l_path and r_path # Driver program to test above function root = Node(4) root.left = Node(2) root.right = Node(5) root.left.left = Node(1) root.left.right = Node(3) if (is_bst(root, -sys.maxint, sys.maxint)): print "Is BST" else: print "Not a BST"
3987405f70f48d91c8ac18c9912585cb8b9c44d3
5ba345bc16519d892fb533451eeface7c76a7d48
/Classification/Logistic-Regression/LogisticRegression.py
33ac92c5f131dde88d715d277e16cca84ae2164e
[]
no_license
sayands/machine-learning-projects
337fd2aeb63814b6c47c9b2597bfe1ce4399a1f1
8e516c0ac3a96a4058d063b86559ded9be654c35
refs/heads/master
2021-05-06T16:26:37.008873
2018-08-02T20:27:20
2018-08-02T20:27:20
113,749,745
1
2
null
null
null
null
UTF-8
Python
false
false
2,653
py
#Logistic Regression #Importing libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd #Importing the dataset dataset = pd.read_csv('Social_Network_Ads.csv') X = dataset.iloc[:, 2:4].values Y = dataset.iloc[:, 4].values #Splitting the dataset into the Training Set and Test set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state = 0) #Feature Scaling from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.transform(X_test) #Fitting Logistic Regression To The Training Set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train,Y_train) #Predicting The Test Set Results y_pred = classifier.predict(X_test) #Making The Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred) #Visualising The Training Set Results from matplotlib.colors import ListedColormap X_set, y_set = X_train, Y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Training set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() #Visualising The Test Set Results from matplotlib.colors import ListedColormap X_set, y_set = X_test, Y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show()
fc5d1edb3647e18a663c8c43b897809c51abbf89
4c2a391f2f4d7361f2c7111b6d63edf67056f327
/model/oauth.py
4c650a7683108b8d5c4e420c7b90b52c00c2172a
[]
no_license
niyoufa/tnd_server
6d69db32ceb5a6a14417b3e8b0f021fdc0e7e79c
59c9ac6769773573685be215b4674d77545fe127
refs/heads/master
2020-06-23T15:43:28.891619
2016-08-26T03:44:01
2016-08-26T03:44:01
66,613,944
0
0
null
null
null
null
UTF-8
Python
false
false
408
py
# -*- coding: utf-8 -*- """ author : youfaNi date : 2016-07-13 """ from bson.son import SON import renren.model.model as model import renren.libs.mongolib as mongo import renren.consts as consts import renren.libs.utils as utils class OauthModel(model.BaseModel,model.Singleton): __name = "renren.oauth_clients" def __init__(self): model.BaseModel.__init__(self,OauthModel.__name)
ea71dcf4271de4375a1cd100421e6cb04179b2a8
ae1d96991a256b905ab8793ebc6063a9628cef02
/muddery/combat/normal_combat_handler.py
f572690ce4f9a5ce3b3ed3411737fa890fdf193b
[ "BSD-3-Clause" ]
permissive
FWiner/muddery
bd2028e431dbeae16d6db9806cd2e9a7f4c5f22d
f6daa5fab6007e7c830e301718154fbc7b78b2bb
refs/heads/master
2020-07-31T23:02:54.165362
2019-09-04T13:29:59
2019-09-04T13:29:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,083
py
""" Combat handler. """ from django.conf import settings from muddery.utils import defines from muddery.utils.builder import delete_object from muddery.combat.base_combat_handler import BaseCombatHandler class NormalCombatHandler(BaseCombatHandler): """ This implements the normal combat handler. """ def start_combat(self): """ Start a combat, make all NPCs to cast skills automatically. """ super(NormalCombatHandler, self).start_combat() for character in self.characters.values(): if not character.account: # Monsters auto cast skills character.start_auto_combat_skill() def at_server_shutdown(self): """ This hook is called whenever the server is shutting down fully (i.e. not for a restart). """ for character in self.characters.values(): # Stop auto cast skills character.stop_auto_combat_skill() super(NormalCombatHandler, self).at_server_shutdown() def show_combat(self, character): """ Show combat information to a character. Args: character: (object) character Returns: None """ super(NormalCombatHandler, self).show_combat(character) # send messages in order character.msg({"combat_commands": character.get_combat_commands()}) def finish(self): """ Finish a combat. Send results to players, and kill all failed characters. """ for character in self.characters.values(): # Stop auto cast skills character.stop_auto_combat_skill() super(NormalCombatHandler, self).finish() def set_combat_results(self, winners, losers): """ Called when the character wins the combat. Args: winners: (List) all combat winners. losers: (List) all combat losers. Returns: None """ super(NormalCombatHandler, self).set_combat_results(winners, losers) # add exp to winners # get total exp exp = 0 for loser in losers: exp += loser.provide_exp(loser) if exp: # give experience to the winner for character in winners: character.add_exp(exp, combat=True) for character in winners: if character.is_typeclass(settings.BASE_PLAYER_CHARACTER_TYPECLASS): # get object list loots = None for loser in losers: obj_list = loser.loot_handler.get_obj_list(character) if obj_list: if not loots: loots = obj_list else: loots.extend(obj_list) # give objects to winner if loots: character.receive_objects(loots, combat=True) # call quest handler for loser in losers: character.quest_handler.at_objective(defines.OBJECTIVE_KILL, loser.get_data_key()) # losers are killed. for character in losers: character.die(winners) def _cleanup_character(self, character): """ Remove character from handler and clean it of the back-reference and cmdset """ super(NormalCombatHandler, self)._cleanup_character(character) if not character.is_typeclass(settings.BASE_PLAYER_CHARACTER_TYPECLASS): if character.is_temp: # notify its location location = character.location delete_object(character.dbref) if location: for content in location.contents: if content.has_account: content.show_location() else: if character.is_alive(): # Recover all hp. character.db.hp = character.max_hp
9aefb0ae5bd605c4dae7ca200d14f1508eb9fb11
f0755c0ca52a0a278d75b76ee5d9b547d9668c0e
/atcoder.jp/abc084/abc084_d/Main.py
672f72253da43a227e962b8055a0caa9001017ec
[]
no_license
nasama/procon
7b70c9a67732d7d92775c40535fd54c0a5e91e25
cd012065162650b8a5250a30a7acb1c853955b90
refs/heads/master
2022-07-28T12:37:21.113636
2020-05-19T14:11:30
2020-05-19T14:11:30
263,695,345
0
0
null
null
null
null
UTF-8
Python
false
false
567
py
def primes(n): is_prime = [1]*(n+1) is_prime[0] = 0 is_prime[1] = 0 for i in range(2, int(n**0.5) + 1): if not is_prime[i]: continue for j in range(i*2,n+1,i): is_prime[j] = 0 return is_prime max = 100001 prime = primes(max) a = [0]*max for i in range(max): if i % 2 == 0: continue if prime[i] and prime[(i+1)//2]: a[i] = 1 s = [0]*(max+1) for i in range(max): s[i+1] = s[i] + a[i] Q = int(input()) for i in range(Q): l,r = map(int, input().split()) print(s[r+1]-s[l])
fb95a962370d7b4bb6c6d781611394a5ad69f45a
e3fe234510d19c120d56f9a2876b7d508d306212
/17tensorflow/5_lm/ngram/ngram.py
6146628f947c8ebec2603563c38c067b7d61b32d
[ "Apache-2.0" ]
permissive
KEVINYZY/python-tutorial
78b348fb2fa2eb1c8c55d016affb6a9534332997
ae43536908eb8af56c34865f52a6e8644edc4fa3
refs/heads/master
2020-03-30T02:11:03.394073
2019-12-03T00:52:10
2019-12-03T00:52:10
150,617,875
0
0
Apache-2.0
2018-09-27T16:39:29
2018-09-27T16:39:28
null
UTF-8
Python
false
false
3,057
py
# -*- coding: utf-8 -*- # Author: XuMing <[email protected]> # Data: 17/11/29 # Brief: """读取语料 生成 n-gram 模型""" from collections import Counter, defaultdict from pprint import pprint from random import random import jieba N = 2 # N元模型 START = '$$' # 句首的 token BREAK = '。!?' # 作为句子结束的符号 IGNORE = '\n “”"《》〈〉()*' # 忽略不计的符号 def process_segs(segments): """对 segments (iterator) 进行处理,返回一个 list. 处理规则: - 忽略 \n、空格、引号、书名号等 - 在断句符号后添加 START token """ results = [START for i in range(N - 1)] for seg in segments: if seg in IGNORE: continue else: results.append(seg) if seg in BREAK: results.extend([START for i in range(N - 1)]) return results def count_ngram(segments): """统计 N-gram 出现次数""" dct = defaultdict(Counter) for i in range(N - 1, len(segments)): context = tuple(segments[i - N + 1:i]) word = segments[i] dct[context][word] += 1 return dct def to_prob(dct): """将次数字典转换为概率字典""" prob_dct = dct.copy() for context, count in prob_dct.items(): total = sum(count.values()) for word in count: count[word] /= total # works in Python 3 return prob_dct def generate_word(prob_dct, context): """根据 context 及条件概率,随机生成 word""" r = random() psum = 0 for word, prob in prob_dct[context].items(): psum += prob if psum > r: return word # return START def generate_sentences(m, prob_dct): """生成 m 个句子""" sentences = [] text = '' context = tuple(START for i in range(N - 1)) i = 0 while (i < m): word = generate_word(prob_dct, context) text = text + word context = tuple((list(context) + [word])[1:]) if word in BREAK: sentences.append(text) text = '' context = tuple(START for i in range(N - 1)) i += 1 return sentences def main(): for N in range(2, 6): print('\n*** reading corpus ***') with open('../../../data/tianlongbabu.txt', encoding="utf8") as f: corpus = f.read() print('*** cutting corpus ***') raw_segments = jieba.cut(corpus) print('*** processing segments ***') segments = process_segs(raw_segments) print('*** generating {}-gram count dict ***'.format(N)) dct = count_ngram(segments) print('*** generating {}-gram probability dict ***'.format(N)) prob_dct = to_prob(dct) # pprint(prob_dct) import pickle pickle.dump(prob_dct) print('*** generating sentences ***') with open('generated_{}gram.txt'.format(N), 'w', encoding="utf8") as f: f.write('\n'.join(generate_sentences(20, prob_dct))) if __name__ == "__main__": main()
5630da04cc30441eabf72f420f1a24217fbaba01
e2423781704811bf0a0ecc07f9cb29d0a044ac48
/tensorflow_datasets/image/bccd/dummy_data_generation.py
bddde3b24d939e2a794def3d52ba9eee64bd8de6
[ "Apache-2.0" ]
permissive
mbbessa/datasets
af2506a8cf5c46c33143d6e0266ba50d8b4c3fcc
2a7e8e793197637948ea0e0be4aa02a6aa2f7f55
refs/heads/master
2021-11-30T22:28:55.825453
2021-11-19T20:49:49
2021-11-19T20:52:42
171,528,015
0
0
Apache-2.0
2019-02-19T18:34:26
2019-02-19T18:34:26
null
UTF-8
Python
false
false
5,387
py
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Generate bccd data. """ import os import random from absl import app from absl import flags import tensorflow as tf from tensorflow_datasets.core.utils import py_utils from tensorflow_datasets.image.bccd import bccd import tensorflow_datasets.public_api as tfds from tensorflow_datasets.testing import fake_data_utils # In TF 2.0, eager execution is enabled by default tf.compat.v1.disable_eager_execution() flags.DEFINE_string("tfds_dir", py_utils.tfds_dir(), "Path to tensorflow_datasets directory") FLAGS = flags.FLAGS MIN_OBJECT_HEIGHT_WIDTH = 100 MAX_OBJECT_HEIGHT_WIDTH = 400 MIN_NUM_OBJECTS = 1 MAX_NUM_OBJECTS = 3 def _output_dir(): return os.path.join(FLAGS.tfds_dir, "image", "bccd", "dummy_data") def _write_text_file(filepath, content): """Write a text file given its content.""" dirname = os.path.dirname(filepath) if not tf.io.gfile.exists(dirname): tf.io.gfile.makedirs(dirname) with tf.io.gfile.GFile(filepath, "w") as f: f.write(content) def _generate_jpeg(example_id, height, width): """Generate a fake jpeg image for the given example id.""" jpeg = fake_data_utils.get_random_jpeg(height=height, width=width) filepath = os.path.join( _output_dir(), "BCCD_Dataset-1.0/BCCD/JPEGImages/BloodImage_{:05d}.jpg".format( example_id)) dirname = os.path.dirname(filepath) if not tf.io.gfile.exists(dirname): tf.io.gfile.makedirs(dirname) tf.io.gfile.copy(jpeg, filepath, overwrite=True) def _generate_annotation(example_id, height, width): """Generate a fake annotation XML for the given example id.""" # pylint: disable=protected-access label_names = tfds.features.ClassLabel(names=bccd._CLASS_LABELS).names # pytype: disable=module-attr # pylint: enable=protected-access annotation = "<annotation>\n" annotation += "<folder>JPEGImages</folder>\n" annotation += "<filename>%d.jpg</filename>\n" % example_id annotation += "<path>/home/pi/detection_dataset/JPEGImages/%d.jpg</path>" % example_id annotation += "<source>\n" annotation += "<database>Unknown</database>\n" annotation += "</source>" annotation += "<size>\n" annotation += "<width>%d</width>\n" % width annotation += "<height>%d</height>\n" % height annotation += "</size>\n" for i in range(random.randint(MIN_NUM_OBJECTS, MAX_NUM_OBJECTS)): annotation += "<object>\n" annotation += " <name>%s</name>\n" % random.choice(label_names) annotation += " <pose>Unspecified</pose>\n" annotation += " <truncated>0</truncated>\n" if i > 0: annotation += " <difficult>%s</difficult>\n" % random.randint(0, 1) else: annotation += " <difficult>0</difficult>\n" obj_w = random.randint(MIN_OBJECT_HEIGHT_WIDTH, MAX_OBJECT_HEIGHT_WIDTH) obj_h = random.randint(MIN_OBJECT_HEIGHT_WIDTH, MAX_OBJECT_HEIGHT_WIDTH) obj_x = random.randint(0, width - obj_w) obj_y = random.randint(0, height - obj_h) annotation += " <bndbox>\n" annotation += " <xmin>%d</xmin>\n" % obj_x annotation += " <ymin>%d</ymin>\n" % obj_y annotation += " <xmax>%d</xmax>\n" % (obj_x + obj_w - 1) annotation += " <ymax>%d</ymax>\n" % (obj_y + obj_h - 1) annotation += " </bndbox>\n" annotation += "</object>\n" annotation += "</annotation>\n" # Add annotation XML to the tar file. filepath = os.path.join( _output_dir(), "BCCD_Dataset-1.0/BCCD/Annotations/BloodImage_{:05d}.xml".format( example_id)) _write_text_file(filepath, annotation) def _generate_data_for_set(set_name, example_start, num_examples): """Generate different data examples for the train, validation or test sets.""" # Generate JPEG and XML files of each example. for example_id in range(example_start, example_start + num_examples): _generate_jpeg(example_id, 480, 640) _generate_annotation(example_id, 480, 640) # Add all example ids to the TXT file with all examples in the set. filepath = os.path.join( _output_dir(), "BCCD_Dataset-1.0/BCCD/ImageSets/Main/%s.txt" % set_name) _write_text_file( filepath, "".join([ "BloodImage_{:05d}\n".format(example_id) for example_id in range(example_start, example_start + num_examples) ])) def _generate_trainval_archive(): """Generate train/val archive.""" _generate_data_for_set("train", example_start=0, num_examples=2) _generate_data_for_set("val", example_start=2, num_examples=1) def _generate_test_archive(): """Generate test archive.""" _generate_data_for_set("test", example_start=3, num_examples=2) def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") _generate_trainval_archive() _generate_test_archive() if __name__ == "__main__": app.run(main)
a1cc5cf11e5624b2b3f89755554f97571fd1a25b
f759188e90610e08b4d85358abeaf27f2796964e
/tinyos-main/apps/PIR_Sensor/util/Listener.py
464d97ddd4475819140e31d39a6f13222a0dc46e
[]
no_license
SoftwareDefinedBuildings/KetiMotes
5555626231edb1cb76cb96bb4134a52d1d88bbb1
b6dfea4b7d3dd384dd78a91ce62e7990cd337009
refs/heads/master
2020-04-06T23:55:42.151717
2014-09-11T18:25:17
2014-09-11T18:25:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,595
py
import socket import UdpReport import re import sys import time import threading port = 7000 stats = {} class PrintStats(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.daemon = True def run(self): while True: self.print_stats() time.sleep(3) def print_stats(self): global stats print "-" * 40 for k, v in stats.iteritems(): print "%s: %i/%i (%0.2f ago) (%0.2f%%)" % (k, v[0], v[3] - v[2] + 1, time.time() - v[1], 100 * float(v[0]) / (v[3] - v[2] + 1)) print "%i total" % len(stats) print "-" * 40 if __name__ == '__main__': s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.bind(('', port)) ps = PrintStats() ps.start() while True: data, addr = s.recvfrom(1024) if (len(data) > 0): rpt = UdpReport.UdpReport(data=data, data_length=len(data)) print rpt.get_seqno() print rpt.get_interval() print rpt.get_readings() print addr[0] if not addr[0] in stats: stats[addr[0]] = (0, time.time(), rpt.get_seqno(), rpt.get_seqno()) cur = stats[addr[0]] stats[addr[0]] = (cur[0] + 1, time.time(), cur[2], rpt.get_seqno())
5f68224654afb98c99125e28a341ed8dd9de664a
316c473d020f514ae81b7485b10f6556cf914fc0
/scrapycrawlspidertest/scrapycrawlspidertest/spiders/universal.py
38f44ee1edb89a378c243113f5a699a7ccc43884
[ "Apache-2.0" ]
permissive
silianpan/seal-spider-demo
ca96b12d4b6fff8fe57f8e7822b7c0eb616fc7f3
7bdb77465a10a146c4cea8ad5d9ac589c16edd53
refs/heads/master
2023-06-20T03:47:04.572721
2023-05-24T06:27:13
2023-05-24T06:27:13
189,963,452
1
1
Apache-2.0
2022-12-08T03:24:54
2019-06-03T08:15:56
Python
UTF-8
Python
false
false
1,855
py
# -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from scrapycrawlspidertest.utils import get_config from scrapycrawlspidertest.items import * from scrapycrawlspidertest.wraps import * from scrapycrawlspidertest import urls class UniversalSpider(CrawlSpider): name = 'universal' def __init__(self, name, *args, **kwargs): config = get_config(name) self.config = config self.rules = eval(config.get('rules')) start_urls = config.get('start_urls') if start_urls: if start_urls.get('type') == 'static': self.start_urls = start_urls.get('value') elif start_urls.get('type') == 'dynamic': self.start_urls = list(eval('urls.' + start_urls.get('method'))(*start_urls.get('args', []))) self.allowed_domains = config.get('allowed_domains') super(UniversalSpider, self).__init__(*args, **kwargs) def parse_item(self, response): # 获取item配置 item = self.config.get('item') if item: data = eval(item.get('class') + '()') # 动态获取属性配置 for key, value in item.get('attrs').items(): data[key] = response for process in value: type = process.get('type', 'chain') if type == 'chain': # 动态调用函数和属性 if process.get('method'): data[key] = getattr(data[key], process.get('method'))(*process.get('args', [])) elif type == 'wrap': args = [data[key]] + process.get('args', []) data[key] = eval(process.get('method'))(*args) yield data
a5d5b5d8d00dd3c8d9faee9c11aeea428df67616
fd94ec2d4cfcdb8aa41c2ecf92504a6502987b54
/scripts/EmuMarker.py
27dad104e3e1fda0c3243804da0f8a2a8f3c2f84
[ "LicenseRef-scancode-glut", "BSD-3-Clause", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "Unlicense", "MIT", "SGI-B-2.0" ]
permissive
greggman/regal
70bccfd935c42f2a532471f84f164b9992886bce
60d5f5f060dcbfa6ff2cdd5bf8823fd5a9cf11db
refs/heads/master
2020-12-30T19:11:25.692166
2012-09-12T14:39:39
2012-09-12T14:39:39
5,432,185
1
0
null
null
null
null
UTF-8
Python
false
false
854
py
#!/usr/bin/python -B formulae = { 'Insert' : { 'entries' : [ 'glInsertEventMarkerEXT' ], 'impl' : [ '_context->marker->InsertEventMarker( _context, ${arg0plus} );', 'RegalAssert(_context->info);', 'if (!_context->info->gl_ext_debug_marker) return;' ] }, 'Push' : { 'entries' : [ 'glPushGroupMarkerEXT' ], 'impl' : [ '_context->marker->PushGroupMarker( _context, ${arg0plus} );', 'RegalAssert(_context->info);', 'if (!_context->info->gl_ext_debug_marker) return;' ] }, 'Pop' : { 'entries' : [ 'glPopGroupMarkerEXT' ], 'impl' : [ '_context->marker->PopGroupMarker( _context );', 'RegalAssert(_context->info);', 'if (!_context->info->gl_ext_debug_marker) return;' ] } }
f4acaf7682a9a1e14d09298963943cca14536cb0
7b38197bb4772724f5e875f9d3b79d61050a072b
/BioSTEAM 1.x.x/biorefineries/cornstover/_plot_spearman.py
0c107226c916f5a7750fac6a89132a83827bf351
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
yalinli2/Bioindustrial-Park
fac6d58d82af56f5081f529c3ee0c65a70fe7bd3
196e2d60ec9bf0466ef804d036c995b89bc72f72
refs/heads/master
2021-09-24T11:24:26.586458
2021-09-09T14:05:33
2021-09-09T14:05:33
232,337,200
2
0
MIT
2021-09-09T14:05:34
2020-01-07T14:04:05
Jupyter Notebook
UTF-8
Python
false
false
2,749
py
# -*- coding: utf-8 -*- """ Created on Mon Sep 16 00:02:57 2019 @author: yoelr """ import pandas as pd from biosteam import colors from biosteam.evaluation.evaluation_tools import plot_spearman # %% Plot Spearman correlations # Replacement parameter labels replacement_labels = { 'Stream-Ethanol price': 'Ethanol price', 'TEA operating days': 'Operating days', 'Stream-cornstover price': 'Cornstover price', 'Fermentation-R301 efficiency': 'Fermentation efficiency', 'Stream-cellulase price': 'Cellulase price', 'Stream-cornstover flow rate': 'Cornstover flow rate', 'TEA income tax': 'Income tax', 'Saccharification and co fermentation-R301 saccharification conversion': 'Saccharification conversion', 'Saccharification and co fermentation-R301 ethanol conversion': 'Fermentation ethanol conversion', 'Boiler turbogenerator-BT boiler efficiency': 'Boiler efficiency', 'Boiler turbogenerator boiler base cost': 'Boiler base cost', 'Boiler turbogenerator turbogenerator base cost': 'Turbogenerator base cost', 'Pretreatment reactor system base cost': 'Pretreatment reactor base cost', 'Power utility price': 'Electricity price', 'Cooling tower base cost': 'Cooling tower base cost', 'Waste water system cost waste water system base cost': 'Wastewater treatment base cost', 'Waste water system cost waste water system exponent': 'Wastewater treatment exponent'} def replace_label_text(label_text): """Replace label text for graph.""" name, distribution = label_text.split(' [') lb, mid, ub = eval('[' + distribution) if 'efficiency' in name: distribution = f" ({lb:.2f}, {mid:.2f}, {ub:.2f})" else: distribution = f" ({lb:.3g}, {mid:.3g}, {ub:.3g})" pos = name.find(' (') if pos != -1: units = str(name[pos:]).replace('(', '[').replace(')', ']') if units == ' [USD/kg]': units = ' [$\mathrm{USD} \cdot \mathrm{kg}^{-1}$]' elif units == ' [USD/kWhr]': units = ' [$\mathrm{USD} \cdot \mathrm{kWhr}^{-1}$]' elif units == ' [kg/hr]': units = ' [$\mathrm{kg} \cdot \mathrm{hr}^{-1}$]' name = name[:pos] else: units = '' if name in replacement_labels: name = replacement_labels[name] return name + units + distribution # Get data rhos = pd.read_excel('Spearman correlation cornstover.xlsx', header=[0], index_col=0).iloc[:, 0] # Get only important parameters rhos = rhos[rhos.abs()>0.055] # Plot and fix axis labels fig, ax = plot_spearman(rhos, top=10, name='MESP') labels = [item.get_text() for item in ax.get_yticklabels()] new_labels = [replace_label_text(i) for i in labels] ax.set_yticklabels(new_labels)
5cc4b052f8af56030d1a18a236cfee198c0e14a0
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
/ECE364/.PyCharm40/system/python_stubs/348993582/gst/_gst/Date.py
b079f8261188a978cb48855b5781e2227e2dea1e
[]
no_license
ArbalestV/Purdue-Coursework
75d979bbe72106975812b1d46b7d854e16e8e15e
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
refs/heads/master
2020-08-29T05:27:52.342264
2018-04-03T17:59:01
2018-04-03T17:59:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
822
py
# encoding: utf-8 # module gst._gst # from /usr/lib64/python2.6/site-packages/gst-0.10/gst/_gst.so # by generator 1.136 # no doc # imports import gobject as __gobject import gobject._gobject as __gobject__gobject import gst as __gst class Date(__gobject.GBoxed): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass day = property(lambda self: object(), lambda self, v: None, lambda self: None) # default month = property(lambda self: object(), lambda self, v: None, lambda self: None) # default year = property(lambda self: object(), lambda self, v: None, lambda self: None) # default __gtype__ = None # (!) real value is ''
71f09344d33d23e6d19b7a1d9894d79eb5f34f8d
986236feac0d098977dc3f98b705d68155048233
/0x06-python-classes/100-singly_linked_list.py
1e413ac97d382295ceaf0a64d2ca75f43de9041b
[]
no_license
Noeuclides/holbertonschool-higher_level_programming
1f1ec5731840f39ab988593ace190403f701ee67
fcf0d733b73904a848b5718266a644c4f6452166
refs/heads/master
2020-05-18T03:28:56.901071
2019-10-03T17:30:20
2019-10-03T17:30:20
184,145,627
0
0
null
null
null
null
UTF-8
Python
false
false
352
py
#!/usr/bin/python3 class Node: def __init__(self, data=0): if type(size) is not int: raise TypeError("size must be an integer") elif size < 0: raise ValueError("size must be >= 0") else: self.__size = size def data(self): return(self.__size ** 2) def data(self, value):
c2ef80a416cc1c202f00d685ef27f6d11b3faf08
4fed7ad67d3bb7da502acaf347dff542971c1c4c
/app.py
24f400a3d432d02740af9391a5b196df5498a484
[ "MIT" ]
permissive
coolsnake/WebFtp
b62437b895261f3083d3f7d3550b541116b30cef
d76bce2391d393d2eeb92be7700dd49a1663e696
refs/heads/master
2021-04-15T14:05:50.752335
2017-09-25T10:59:50
2017-09-25T10:59:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
818
py
#!/usr/bin/env python3 import tornado.ioloop import tornado.web from controllers import index from controllers import account settings = { 'template_path': 'template', 'static_path': 'static', 'static_url_prefix': '/static/', 'cookie_secret': '43809138f51b96f8ac24e79b3a2cb482', 'login_url': '/login', #'xsrf_cookies': True, 'debug': True, 'autoreload': True, } application = tornado.web.Application([ # 主页 (r"/index", index.IndexHandler), # Admin (r"/admin", index.AdminHandle), # 登录 (r"/login", account.LoginHandler), # 登出 (r"/logout", account.LogoutHandler), # 上传 (r"/upload", index.UploadFileNginxHandle), ], **settings) if __name__ == '__main__': application.listen(8000) tornado.ioloop.IOLoop.instance().start()
9c26acdd9f243cc659a6ae97ad61d70e3a774709
af3ec207381de315f4cb6dddba727d16d42d6c57
/dialogue-engine/src/programy/spelling/textblob_spelling.py
17dce9132a295389213305638b9ac113ad1c6fc2
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
mcf-yuichi/cotoba-agent-oss
02a5554fe81ce21517f33229101013b6487f5404
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
refs/heads/master
2023-01-12T20:07:34.364188
2020-11-11T00:55:16
2020-11-11T00:55:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,540
py
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from textblob import TextBlob from programy.spelling.base import SpellingChecker class TextBlobSpellingChecker(SpellingChecker): def __init__(self, spelling_config=None): SpellingChecker.__init__(self, spelling_config) def correct(self, phrase): blob = TextBlob(phrase) correct_blob = blob.correct() return str(correct_blob)
2146739f9834f0af7d112fc44b3b75d696d80c39
f1a51bb6cb5810a2dfac27cbbe32f5c5761bd8ec
/angrmanagement/data/object_container.py
63b931645b8e1c14d8a5902e8eb52b570ff38979
[ "BSD-2-Clause" ]
permissive
sraboy/angr-management
904848408e9eec6662e16d9b69a0991b0374d3c6
4c4c1df7bce7083547ae38a19709f33dd10b7e22
refs/heads/master
2020-04-30T17:23:24.427321
2019-09-21T09:34:21
2019-09-21T09:34:21
176,977,927
0
1
BSD-2-Clause
2019-03-21T15:52:06
2019-03-21T15:52:06
null
UTF-8
Python
false
false
2,374
py
from ..utils.namegen import NameGenerator class EventSentinel: def __init__(self): self.am_subscribers = [] def am_subscribe(self, listener): if listener is not None: self.am_subscribers.append(listener) def am_unsubscribe(self, listener): if listener is not None: self.am_subscribers.remove(listener) def am_event(self, **kwargs): for listener in self.am_subscribers: listener(**kwargs) class ObjectContainer(EventSentinel): def __init__(self, obj, name=None, notes=''): super(ObjectContainer, self).__init__() self._am_obj = None self.am_obj = obj self.am_name = name if name is not None else NameGenerator.random_name() self.am_notes = notes # cause events to propagate upward through nested objectcontainers @property def am_obj(self): return self._am_obj @am_obj.setter def am_obj(self, v): if type(self._am_obj) is ObjectContainer: self._am_obj.am_unsubscribe(self.__forwarder) if type(v) is ObjectContainer: v.am_subscribe(self.__forwarder) self._am_obj = v def am_none(self): return self._am_obj is None def __forwarder(self, **kwargs): kwargs['forwarded'] = True self.am_event(**kwargs) def __getattr__(self, item): if item.startswith('am_') or item.startswith('_am_'): return object.__getattribute__(self, item) return getattr(self._am_obj, item) def __setattr__(self, key, value): if key.startswith('am_') or key.startswith('_am_'): return object.__setattr__(self, key, value) setattr(self._am_obj, key, value) def __getitem__(self, item): return self._am_obj[item] def __setitem__(self, key, value): self._am_obj[key] = value def __dir__(self): return dir(self._am_obj) + list(self.__dict__) + list(EventSentinel.__dict__) + ['am_obj', 'am_full'] def __iter__(self): return iter(self._am_obj) def __len__(self): return len(self._am_obj) def __eq__(self, other): return self is other or self._am_obj == other def __ne__(self, other): return not (self == other) def __repr__(self): return '(container %s)%s' % (self.am_name, repr(self._am_obj))
a4f282d077acf231c813e0781067964299e282f7
6f50d88145923deba55f5df5f88e872a46504f71
/siteconfig/utils.py
e51be1affa4f0bc68f3bca34d399fc656a2d03cf
[]
no_license
vfxetc/siteconfig
ce85cff95a865a8ab6271f305b70643c364c1952
7124e941cf5068a70f07d0011902af797b74657e
refs/heads/master
2021-09-12T13:00:40.933138
2017-08-04T15:08:42
2017-08-04T15:08:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
314
py
import re def normalize_key(input_): input_ = re.sub(r'[^\w\[\]]+', '_', input_) input_ = re.sub(r'^(\w+)', lambda m: m.group(1).upper(), input_) return input_ def shell_escape(input_): return str(input_).replace('"', '\\"') def shell_quote(input_): return '"%s"' % shell_escape(input_)
30054b750cf65f48a5410dc67d6c9fd17cee69f1
53947441840357e3966eda580c6a5de3e0b92613
/blaze/module/qualname.py
65b9d324944ebfcc5fd2d0ce2f7ac4a97f5915c3
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause" ]
permissive
jedbrown/blaze-core
8b9eda6267253e1609a7382197ffdf67a41407f6
b2f0d350f5cb7b802819ca46738bacdbe70db13a
refs/heads/master
2021-01-17T06:24:38.307059
2013-04-04T18:24:36
2013-04-04T18:24:36
9,283,188
2
1
null
null
null
null
UTF-8
Python
false
false
1,381
py
#------------------------------------------------------------------------ # Names #------------------------------------------------------------------------ class Namespace(object): def __init__(self, names): self.names = names def show(self): return '.'.join(self.names) class QualName(object): def __init__(self, namespace, name): assert isinstance(namespace, list) self.namespace = name self.name = name def isprim(self): return self.namespace == ['Prims'] def isqual(self): return len(self.namespace) > 1 def show(self): return '.'.join(self.namespace + [self.name]) def __str__(self): return self.show() #------------------------------------------------------------------------ # Module #------------------------------------------------------------------------ class Module(object): def __init__(self, name): self.name = name def alias(self): pass def expose(self, sym, sig): pass #------------------------------------------------------------------------ # Function References #------------------------------------------------------------------------ # string -> name # Reference to a function name def name(s): pass # name -> term # Reference to a function name def ref(n): pass # string -> term def fn(s): pass
b1b10f74c7b2b141fab2f67520ef2bafb047a1f3
051d25888b6a36e50714fa5940f6a31ee951ce77
/gentb_website/tb_website/apps/dropbox_helper/dropbox_util.py
f43f6ff658915949644e878347d6b70ddd524912
[ "MIT" ]
permissive
cchoirat/gentb-site
d0d627ffc160c53b61d92dc8f02a11f930a2b09a
24ebce58cd5f5e0a2f1449e2f14b1f75b592f28f
refs/heads/master
2021-01-21T02:20:55.909012
2015-11-25T18:27:23
2015-11-25T18:27:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,476
py
from os.path import join, isdir import os from apps.dropbox_helper.dropbox_retriever import DropboxRetriever from apps.dropbox_helper.models import DropboxRetrievalLog from django.conf import settings def get_dropbox_metadata(predict_dataset): """ Wrap the DropboxRetriever function - (True, DropboxRetrievalLog object) - (False, error message string) """ if predict_dataset is None: return (False, "The dataset was not found.") # Initialize # dr = DropboxRetriever(predict_dataset.dropbox_url, destination_dir=predict_dataset.file_directory, file_patterns=self.predict_dataset.get_file_patterns()) db_log = DropboxRetrievalLog(dataset=predict_dataset) if dr.err_found: db_log.file_metadata_err_msg = dr.err_msg db_log.save() return (False, dr.err_msg) # Get the metadata # if not dr.step1_retrieve_metadata(): db_log.file_metadata_err_msg = dr.err_msg db_log.save() return (False, dr.err_msg) # Does it have what we want? # if not dr.step2_check_file_matches(): db_log.file_metadata_err_msg = dr.err_msg db_log.save() return (False, dr.err_msg) # Yes! db_log.file_metadata = dr.dropbox_link_metadata db_log.selected_files = dr.matching_files_metadata db_log.save() return (True, dr) def get_dropbox_metadata_from_link(dropbox_link, file_patterns=None): """ Wrap the DropboxRetriever function - (True, DropboxRetriever object) - (False, error message string) """ if dropbox_link is None: return (False, "The dataset was not found.") # This directory doesn't actually get used # tmp_dir = join(settings.TB_SHARED_DATAFILE_DIRECTORY, 'tmp') if not isdir(tmp_dir): os.makedirs(tmp_dir) # Initialize # if file_patterns: dr = DropboxRetriever(dropbox_link,\ destination_dir=tmp_dir,\ file_patterns=file_patterns) else: dr = DropboxRetriever(dropbox_link,\ destination_dir=tmp_dir) if dr.err_found: return (False, dr.err_msg) # Get the metadata # if not dr.step1_retrieve_metadata(): return (False, dr.err_msg) # Does it have what we want? # if not dr.step2_check_file_matches(): return (False, dr.err_msg) # Yes! return (True, dr)
32f23cb372dfdf98567ae16228bdbb95e6934524
74549d7c57b4746ac2a9c275aa12bfc577b0e8af
/hogwartsEmailAdderss.py
245b738aa52c6cc12e18274915c1042e79fc0fa9
[]
no_license
abidkhan484/hackerrank_solution
af9dbf6ec1ead920dc18df233f40db0c867720b4
b0a98e4bdfa71a4671999f16ab313cc5c76a1b7a
refs/heads/master
2022-05-02T11:13:29.447127
2022-04-13T03:02:59
2022-04-13T03:02:59
99,207,401
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
#!/bin/python3 import string def isValid(email): for i in range(5): if email[i] not in string.ascii_lowercase: return 'No' else: if email[5:] != '@hogwarts.com': return 'No' return 'Yes' if __name__ == "__main__": s = input().strip() result = isValid(s) print(result)
f401333e5549b41f09b8c1318936448c3a83d737
98e1716c1c3d071b2fedef0ac029eb410f55762c
/part9-Manipulating-DataFrames-with-pandas/No08-Changing-index-of-a-DataFrame.py
c7f11201fbcc7e64a48481ca0e8a27f8c2375844
[]
no_license
iamashu/Data-Camp-exercise-PythonTrack
564531bcf1dff119949cbb75e1fd63d89cb2779f
c72a4e806494f0e263ced9594597dc8882c2131c
refs/heads/master
2020-07-22T00:23:12.024386
2019-04-12T09:24:42
2019-04-12T09:24:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,477
py
#Changing index of a DataFrame ''' As you saw in the previous exercise, indexes are immutable objects. This means that if you want to change or modify the index in a DataFrame, then you need to change the whole index. You will do this now, using a list comprehension to create the new index. A list comprehension is a succinct way to generate a list in one line. For example, the following list comprehension generates a list that contains the cubes of all numbers from 0 to 9: cubes = [i**3 for i in range(10)]. This is equivalent to the following code: cubes = [] for i in range(10): cubes.append(i**3) Before getting started, print the sales DataFrame in the IPython Shell and verify that the index is given by month abbreviations containing lowercase characters. Instructions 100 XP Create a list new_idx with the same elements as in sales.index, but with all characters capitalized. Assign new_idx to sales.index. Print the sales dataframe. This has been done for you, so hit 'Submit Answer' and to see how the index changed. ''' # Code # Create the list of new indexes: new_idx new_idx = [i.upper() for i in sales.index] #my error: new_idx = [sales.index.upper() for sales.index in sales.index] # Assign new_idx to sales.index sales.index = new_idx # Print the sales DataFrame print(sales) '''result eggs salt spam JAN 47 12.0 17 FEB 110 50.0 31 MAR 221 89.0 72 APR 77 87.0 20 MAY 132 NaN 52 JUN 205 60.0 55 '''
3d456ff2fdf7f69d9519317f0a9a47b44322d273
f4b75e06e456dbd065dc57f07d55a2f5ec4ad688
/openstates/data/migrations/0012_person_current_role.py
d9e866c1e3a313f007b32336097bd875c571590a
[ "MIT" ]
permissive
openstates/openstates-core
19bf927a2e72c8808a5601f4454846acaf32218a
3055632ea7ddab6432cc009989ffb437aed6e530
refs/heads/main
2023-09-05T10:30:58.866474
2023-09-01T15:43:59
2023-09-01T15:43:59
251,511,904
19
27
MIT
2023-09-06T19:30:03
2020-03-31T05:47:28
Python
UTF-8
Python
false
false
477
py
# Generated by Django 3.0.5 on 2020-08-04 15:24 import django.contrib.postgres.fields.jsonb from django.db import migrations class Migration(migrations.Migration): dependencies = [("data", "0011_auto_20200804_1108")] operations = [ migrations.AddField( model_name="person", name="current_role", field=django.contrib.postgres.fields.jsonb.JSONField( default=None, null=True ), ) ]
497e345288a9d28536fdbaf5f67a2102b003849e
7652b3d21519771aa073c4f4a9d66f4f4d5db013
/creating-project/project/project_app/urls.py
de7fffc7ea068fde214f0d92d79c134b3e945a32
[]
no_license
pavkozlov/NETOLOGY-Django-homeworks
9c64cde294590c8a85c5f89fd2190fe989720c84
c331fa10906470c974802932e9d7d7526841f6f1
refs/heads/master
2022-11-27T22:36:12.537296
2019-07-17T16:19:11
2019-07-17T16:19:11
189,250,824
0
1
null
2022-11-22T03:14:37
2019-05-29T15:20:09
Python
UTF-8
Python
false
false
282
py
from django.urls import path from .views import stations_view from django.conf import settings from django.conf.urls.static import static urlpatterns = [ path('stations/', stations_view, name='stations_view') ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
dcf60d425a75a5583dc890529bb1f1fffe42a262
428ee863e50fecfaedbbf64f3da95e9acb746ae4
/src/tamsin/main.py
a9ea83fa097c8b5749742963afb74886d3b5d15a
[ "BSD-3-Clause", "Unlicense", "LicenseRef-scancode-public-domain" ]
permissive
catseye/Tamsin
ba53a0ee4ac882486a958e6ba7225f19eea763ef
1c9e7ade052d734fa1753d612f2426ac067d5252
refs/heads/master
2021-01-17T09:21:25.202969
2016-03-31T15:00:14
2016-03-31T15:00:14
19,212,331
12
1
null
null
null
null
UTF-8
Python
false
false
4,766
py
# encoding: UTF-8 # Copyright (c)2014 Chris Pressey, Cat's Eye Technologies. # Distributed under a BSD-style license; see LICENSE for more information. import os import subprocess import sys from tamsin.buffer import FileBuffer, StringBuffer from tamsin.event import DebugEventListener from tamsin.term import Atom from tamsin.scanner import ( Scanner, EOF, UTF8ScannerEngine, TamsinScannerEngine ) from tamsin.parser import Parser from tamsin.interpreter import Interpreter from tamsin.desugarer import Desugarer from tamsin.analyzer import Analyzer from tamsin.compiler import Compiler # to be replaced by... from tamsin.codegen import CodeGen from tamsin.backends.c import Emitter def parse(filename): with open(filename, 'r') as f: scanner = Scanner( FileBuffer(f, filename=filename), #StringBuffer(f.read(), filename=filename), engines=(TamsinScannerEngine(),) ) parser = Parser(scanner) ast = parser.grammar() desugarer = Desugarer(ast) ast = desugarer.desugar(ast) return ast def parse_and_check_args(args): ast = None for arg in args: next_ast = parse(arg) if ast is None: ast = next_ast else: ast.incorporate(next_ast) analyzer = Analyzer(ast) ast = analyzer.analyze(ast) return ast def run(ast, listeners=None): scanner = Scanner( FileBuffer(sys.stdin, filename='<stdin>'), #StringBuffer(sys.stdin.read(), filename='<stdin>'), engines=(UTF8ScannerEngine(),), listeners=listeners ) interpreter = Interpreter( ast, scanner, listeners=listeners ) (succeeded, result) = interpreter.interpret_program(ast) if not succeeded: sys.stderr.write(str(result) + "\n") sys.exit(1) print str(result) def main(args, tamsin_dir='.'): listeners = [] if args[0] == '--debug': listeners.append(DebugEventListener()) args = args[1:] if args[0] == 'scan': with open(args[1], 'r') as f: scanner = Scanner( FileBuffer(f, filename=args[1]), engines=(TamsinScannerEngine(),), listeners=listeners ) tok = None while tok is not EOF: tok = scanner.scan() if tok is not EOF: print Atom(tok).repr() print elif args[0] == 'parse': parser = Parser.for_file(args[1]) ast = parser.grammar() print str(ast) elif args[0] == 'desugar': parser = Parser.for_file(args[1]) ast = parser.grammar() desugarer = Desugarer(ast) ast = desugarer.desugar(ast) print str(ast) elif args[0] == 'analyze': ast = parse_and_check_args(args[1:]) print str(ast) elif args[0] == 'compile': ast = parse_and_check_args(args[1:]) compiler = Compiler(ast, sys.stdout) compiler.compile() elif args[0] == 'codegen': ast = parse_and_check_args(args[1:]) generator = CodeGen(ast) result = generator.generate() emitter = Emitter(result, sys.stdout) emitter.go() elif args[0] == 'doublecompile': # http://www.youtube.com/watch?v=6WxJECOFg8w ast = parse_and_check_args(args[1:]) c_filename = 'foo.c' exe_filename = './foo' with open(c_filename, 'w') as f: compiler = Compiler(ast, f) compiler.compile() c_src_dir = os.path.join(tamsin_dir, 'c_src') command = ("gcc", "-g", "-I%s" % c_src_dir, "-L%s" % c_src_dir, c_filename, "-o", exe_filename, "-ltamsin") try: subprocess.check_call(command) exit_code = 0 except subprocess.CalledProcessError: exit_code = 1 #subprocess.call(('rm', '-f', c_filename)) sys.exit(exit_code) elif args[0] == 'loadngo': ast = parse_and_check_args(args[1:]) c_filename = 'foo.c' exe_filename = './foo' with open(c_filename, 'w') as f: compiler = Compiler(ast, f) compiler.compile() c_src_dir = os.path.join(tamsin_dir, 'c_src') command = ("gcc", "-g", "-I%s" % c_src_dir, "-L%s" % c_src_dir, c_filename, "-o", exe_filename, "-ltamsin") try: subprocess.check_call(command) subprocess.check_call((exe_filename,)) exit_code = 0 except subprocess.CalledProcessError: exit_code = 1 subprocess.call(('rm', '-f', c_filename, exe_filename)) sys.exit(exit_code) else: ast = parse_and_check_args(args) run(ast, listeners=listeners)
5b3e342ade56e396a3dfad0237f974e5082e1bc9
114b61513733083555924fc8ab347335e10471ae
/stackone/stackone/viewModel/MultipartPostHandler.py
df8c4aa526fbfe18ae6a303322624a6199dcffe3
[]
no_license
smarkm/ovm
6e3bea19816affdf919cbd0aa81688e6c56e7565
cd30ad5926f933e6723805d380e57c638ee46bac
refs/heads/master
2021-01-21T04:04:28.637901
2015-08-31T03:05:03
2015-08-31T03:05:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,033
py
#!/usr/bin/python #### # 02/2006 Will Holcomb <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # 7/26/07 Slightly modified by Brian Schneider # in order to support unicode files ( multipart_encode function ) """ Usage: Enables the use of multipart/form-data for posting forms Inspirations: Upload files in python: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 urllib2_file: Fabien Seisen: <[email protected]> Example: import MultipartPostHandler, urllib2, cookielib cookies = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler.MultipartPostHandler) params = { "username" : "bob", "password" : "riviera", "file" : open("filename", "rb") } opener.open("http://wwww.bobsite.com/upload/", params) Further Example: The main function of this file is a sample which downloads a page and then uploads it to the W3C validator. """ import urllib import urllib2 import mimetools, mimetypes import os, stat from cStringIO import StringIO import sys class Callable: def __init__(self, anycallable): self.__call__ = anycallable # Controls how sequences are uncoded. If true, elements may be given multiple values by # assigning a sequence. doseq = 1 class MultipartPostHandler(urllib2.BaseHandler): handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first def http_request(self, request): data = request.get_data() if data is not None and type(data) != str: v_files = [] v_vars = [] try: for(key, value) in data.items(): if type(value) == file: v_files.append((key, value)) else: v_vars.append((key, value)) except TypeError: systype, value, traceback = sys.exc_info() raise TypeError, "not a valid non-string sequence or mapping object", traceback if len(v_files) == 0: data = urllib.urlencode(v_vars, doseq) else: boundary, data = self.multipart_encode(v_vars, v_files) contenttype = 'multipart/form-data; boundary=%s' % boundary if(request.has_header('Content-Type') and request.get_header('Content-Type').find('multipart/form-data') != 0): print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data') request.add_unredirected_header('Content-Type', contenttype) request.add_data(data) return request def multipart_encode(vars, files, boundary = None, buf = None): if boundary is None: boundary = mimetools.choose_boundary() if buf is None: buf = StringIO() for(key, value) in vars: buf.write('--%s\r\n' % boundary) buf.write('Content-Disposition: form-data; name="%s"' % key) buf.write('\r\n\r\n' + value + '\r\n') for(key, fd) in files: file_size = os.fstat(fd.fileno())[stat.ST_SIZE] filename = fd.name.split('/')[-1] contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' buf.write('--%s\r\n' % boundary) buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)) buf.write('Content-Type: %s\r\n' % contenttype) # buffer += 'Content-Length: %s\r\n' % file_size fd.seek(0) buf.write('\r\n' + fd.read() + '\r\n') buf.write('--' + boundary + '--\r\n\r\n') buf = buf.getvalue() return boundary, buf multipart_encode = Callable(multipart_encode) https_request = http_request def main(): import tempfile, sys validatorURL = "http://localhost" opener = urllib2.build_opener(MultipartPostHandler) def validateFile(url): temp = tempfile.mkstemp(suffix=".html") os.write(temp[0], opener.open(url).read()) params = { "ss" : "0", # show source "doctype" : "Inline", "uploaded_file" : open(temp[1], "rb") } print opener.open(validatorURL, params).read() os.remove(temp[1]) if len(sys.argv[1:]) > 0: for arg in sys.argv[1:]: validateFile(arg) else: validateFile("http://www.google.com") if __name__=="__main__": main()
2332d5c21dfd47be0eab2e6439fbacef32c5aeb3
09e57dd1374713f06b70d7b37a580130d9bbab0d
/data/p3BR/R1/benchmark/startPyquil199.py
b8ba1c63c355402f38a256e26772b3f9cb67ca75
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
1,775
py
# qubit number=2 # total number=33 import pyquil from pyquil.api import local_forest_runtime, QVMConnection from pyquil import Program, get_qc from pyquil.gates import * import numpy as np conn = QVMConnection() def make_circuit()-> Program: prog = Program() # circuit begin prog += H(0) # number=1 prog += RX(-0.09738937226128368,2) # number=2 prog += H(1) # number=30 prog += CZ(2,1) # number=31 prog += H(1) # number=32 prog += H(1) # number=3 prog += CNOT(1,0) # number=4 prog += Y(1) # number=15 prog += CNOT(1,0) # number=10 prog += H(1) # number=19 prog += CZ(0,1) # number=20 prog += RX(-0.6000441968356504,1) # number=28 prog += H(1) # number=21 prog += CNOT(0,1) # number=22 prog += X(1) # number=23 prog += H(2) # number=29 prog += CNOT(0,1) # number=24 prog += CNOT(0,1) # number=18 prog += Z(1) # number=11 prog += CNOT(1,0) # number=12 prog += CNOT(2,1) # number=26 prog += Y(1) # number=14 prog += CNOT(1,0) # number=5 prog += X(1) # number=6 prog += Z(1) # number=8 prog += X(1) # number=7 prog += RX(-2.42845112122491,1) # number=25 # circuit end return prog def summrise_results(bitstrings) -> dict: d = {} for l in bitstrings: if d.get(l) is None: d[l] = 1 else: d[l] = d[l] + 1 return d if __name__ == '__main__': prog = make_circuit() qvm = get_qc('1q-qvm') results = qvm.run_and_measure(prog,1024) bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T bitstrings = [''.join(map(str, l)) for l in bitstrings] writefile = open("../data/startPyquil199.csv","w") print(summrise_results(bitstrings),file=writefile) writefile.close()
f427572dcc294f2f278b1dc156e4b0e0c130a115
a4c5a56ed6d3c4299213ff8fd0e4f37719e063ff
/tests/test_override.py
ec0ad1f2bfab03914d5df5c21408b1e52fcbb993
[ "BSD-3-Clause" ]
permissive
pyecore/motra
76add183cf2777bef5916b88e30dd2b3eef8cb06
c0b3e8e54b46572c3bc10bb2b719102e267c371b
refs/heads/main
2023-09-02T12:44:37.688979
2021-10-27T05:53:01
2021-10-27T05:53:01
395,357,398
5
1
null
null
null
null
UTF-8
Python
false
false
965
py
import pytest import inspect import pyecore.ecore as ecore from motra import m2m @pytest.fixture(scope='module') def t1(): # Define a transformation meta-data t = m2m.Transformation('t1', inputs=['in_model'], outputs=['in_model']) @t.mapping(when=lambda self: self.name.startswith('Egg')) def r1(self: ecore.EClass): self.name = self.name + '_egg' @t.mapping(when=lambda self: self.name.startswith('Spam')) def r1(self: ecore.EClass): self.name = self.name + '_spam' return t, r1 def test__override_with_when(t1): t, r1 = t1 # Fake main for the mapping execution result1 = None result2 = None def fake_main(in_model): nonlocal result1 nonlocal result2 result1 = r1(ecore.EClass('Spam')) result2 = r1(ecore.EClass('Egg')) t._main = fake_main t.run(in_model=ecore.EPackage()) assert result1.name == "Spam_spam" assert result2.name == "Egg_egg"