blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f860618c91ddd2790cbf9ab5a23363af82c9ded | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/matplotlib/backends/backend_wxagg.py | 106578e7e14b4f64bcb6da6b5c7266b950eda808 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 2,916 | py | import wx
from .backend_agg import FigureCanvasAgg
from .backend_wx import (
_BackendWx, _FigureCanvasWxBase, FigureFrameWx,
NavigationToolbar2Wx as NavigationToolbar2WxAgg)
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
class FigureCanvasWxAgg(FigureCanvasAgg, _FigureCanvasWxBase):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
# docstring inherited
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
x = int(bbox.x0)
y = int(self.bitmap.GetHeight() - bbox.y1)
destDC.Blit(x, y, int(bbox.width), int(bbox.height), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.Bitmap.FromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
srcBmp = wx.Bitmap.FromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.Bitmap(int(bbox.width), int(bbox.height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(bbox.x0)
y = int(int(agg.height) - bbox.y1)
destDC.Blit(0, 0, int(bbox.width), int(bbox.height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
@_BackendWx.export
class _BackendWxAgg(_BackendWx):
FigureCanvas = FigureCanvasWxAgg
_frame_class = FigureFrameWxAgg
| [
"[email protected]"
] | |
6eb5d6f21a0f8cb5ea2bf73c210ca1f46ca447bf | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /large_case.py | 277a782fa6b80b12aba0b6d2cd3f37a72636cccd | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
#! /usr/bin/env python
def problem_or_right_thing(str_arg):
world(str_arg)
print('number')
def world(str_arg):
print(str_arg)
if __name__ == '__main__':
problem_or_right_thing('see_same_problem_from_government')
| [
"[email protected]"
] | |
f9899a02fbb389cfb24430cb2d5568571f7d1eee | 53c91272444bfab92e7e89e0358047b27eab1125 | /03.代码/豆瓣评论/scrapydouban/scrapydouban/main.py | 14e98c156d8afabc0ee0c2f3618ec95177b648b0 | [] | no_license | MrFiona/python_module_summary | 2bbf9f30e0fbfe302e7e6c429754fa7bf4bfc411 | 4e36f6f5f6abed10fc06b16b0ed7c12cde7746d0 | refs/heads/master | 2021-01-20T03:54:38.105298 | 2019-01-07T07:28:36 | 2019-01-07T07:28:36 | 101,373,212 | 2 | 0 | null | 2018-04-15T05:56:45 | 2017-08-25T06:28:52 | Jupyter Notebook | UTF-8 | Python | false | false | 231 | py | #!/user/bin/python
#-*- coding:utf-8 -*-
'''
@author: 创客▪榕
@contact: [email protected]
@file: main.py
@time: 2017/5/15 15:01
'''
from scrapy import cmdline
cmdline.execute('scrapy crawl DoubanBooksDetail'.split())
| [
"[email protected]"
] | |
95a6fd239a4a0467a1839ba2bd9e0c8e5ff51381 | d31991e464835225abd17340b41b409d180ff639 | /noetikon/files/managers.py | e2cf975193736189a24c10c04ed0e067db568a8b | [
"MIT"
] | permissive | webkom/noetikon | c6de7dd2c4cffc84ae4746561ac1da8019eda1f5 | 0fcede2d63a79b51bc29ea4b62d9cbc4ba993180 | refs/heads/master | 2021-01-16T23:57:31.425562 | 2016-09-12T18:20:37 | 2016-09-12T18:20:37 | 29,366,121 | 4 | 0 | null | 2017-03-01T14:51:59 | 2015-01-16T20:17:19 | Python | UTF-8 | Python | false | false | 782 | py | from basis.managers import PersistentModelManager
from django.db.models import Q
class DirectoryManager(PersistentModelManager):
def permitted(self, user):
if user.is_superuser:
return self
query = Q(id=-1)
query |= Q(users_with_access=user)
for group in user.groups.all():
query |= Q(groups_with_access=group)
return self.filter(query).distinct()
class FileManager(PersistentModelManager):
def permitted(self, user):
if user.is_superuser:
return self
query = Q(id=-1)
query |= Q(parent_folder__users_with_access=user)
for group in user.groups.all():
query |= Q(parent_folder__groups_with_access=group)
return self.filter(query).distinct()
| [
"[email protected]"
] | |
d2c049e4b584b0d9ea9fe5ab855eaf54a61e1407 | 6de622e922361beac91e3cfc4cd67829451bc095 | /wyzepal/integrations/irc/irc-mirror.py | 3201a3ce7e0af7f254b1668150be83d0bdc59548 | [] | no_license | WyzePal/api | fd1f1771aa9e1bfeb5d5de102b3f525d905fae29 | 8646c90148885b1c4286557bd62cfcf844b9d107 | refs/heads/master | 2020-03-23T15:25:53.559240 | 2019-03-08T23:54:00 | 2019-03-08T23:54:00 | 141,747,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | #!/usr/bin/env python
#
# EXPERIMENTAL
# IRC <=> WyzePal mirroring bot
#
# Setup: First, you need to install python-irc version 8.5.3
# (https://github.com/jaraco/irc)
from __future__ import print_function
import argparse
import wyzepal
import sys
import traceback
if False:
from typing import Any, Dict
usage = """./irc-mirror.py --irc-server=IRC_SERVER --channel=<CHANNEL> --nick-prefix=<NICK> [optional args]
Example:
./irc-mirror.py --irc-server=127.0.0.1 --channel='#test' --nick-prefix=username
Specify your WyzePal API credentials and server in a ~/.wyzepalrc file or using the options.
Note that "_wyzepal" will be automatically appended to the IRC nick provided
Also note that at present you need to edit this code to do the WyzePal => IRC side
"""
if __name__ == "__main__":
parser = wyzepal.add_default_arguments(argparse.ArgumentParser(usage=usage), allow_provisioning=True)
parser.add_argument('--irc-server', default=None)
parser.add_argument('--port', default=6667)
parser.add_argument('--nick-prefix', default=None)
parser.add_argument('--channel', default=None)
options = parser.parse_args()
# Setting the client to irc_mirror is critical for this to work
options.client = "irc_mirror"
wyzepal_client = wyzepal.init_from_options(options)
try:
from irc_mirror_backend import IRCBot
except ImportError as e:
traceback.print_exc()
print("You have unsatisfied dependencies. Install all missing dependencies with "
"{} --provision".format(sys.argv[0]))
sys.exit(1)
if options.irc_server is None or options.nick_prefix is None or options.channel is None:
parser.error("Missing required argument")
nickname = options.nick_prefix + "_wyzepal"
bot = IRCBot(wyzepal_client, options.channel, nickname, options.irc_server, options.port)
bot.start()
| [
"[email protected]"
] | |
d3a121fca276e1c24ca96cb517a01a0a8faf1b75 | 633b695a03e789f6aa644c7bec7280367a9252a8 | /samplepy/6-03_student_card.py | 123c709227e82e443dfc704cb4af4d119033367a | [] | no_license | tnakaicode/PlotGallery | 3d831d3245a4a51e87f48bd2053b5ef82cf66b87 | 5c01e5d6e2425dbd17593cb5ecc973982f491732 | refs/heads/master | 2023-08-16T22:54:38.416509 | 2023-08-03T04:23:21 | 2023-08-03T04:23:21 | 238,610,688 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | class StudentCard:
def __init__(self):
print('初期化メソッド内の処理です')
self.id = 0
self.name = '未定'
a = StudentCard()
b = StudentCard()
print(f'a.id:{a.id}, a.name:{a.name}')
print(f'b.id:{b.id}, b.name:{b.name}') | [
"[email protected]"
] | |
c82d7fe7d81d9549ba5139768d173c9cd11899a2 | a99e86146150aae97cd36311c3a90d95c272125a | /config.py | d36f3783d1440ab2f467cb67b9645c96fa0176eb | [] | no_license | mutaihillary/userapp | 017ac68124e72b559ddb5a1e81f60fd0006ffb30 | 0da93766967c37e7c203e995765321eecdd3ac7e | refs/heads/master | 2021-01-19T01:05:43.369086 | 2016-08-19T05:45:40 | 2016-08-19T05:45:40 | 65,550,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from flask_sqlalchemy import SQLAlchemy
from flask import Flask
import os
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///' + os.path.join(basedir, 'userapp.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app) | [
"[email protected]"
] | |
161381b413380795aca8c929268f18b4c212f395 | b5d87f3fbe5ae84522c9391040a51145966ed226 | /yelp/basic_python/client.py | a407ecbde4dc0b02fc7be7b2e20205d64bfbbe52 | [] | no_license | oliverhuangchao/algorithm | f8b17743436c7d2e92b0761deafbf6af93ef922f | 858885bc2b6b7070b5536695214c915106d56f8c | refs/heads/master | 2021-01-10T05:54:41.181112 | 2015-07-09T19:55:04 | 2015-07-09T19:55:04 | 36,044,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import socket
s = socket.socket()
host = socket.gethostname()
port = 12345
s.connect((host,port))
print s.recv(1024)
s.close()
| [
"[email protected]"
] | |
64d29f78ae1643a4169e7455dbbc3beeb67c6dbd | 676f6f2d02db6aeeaa1bb0b28ab49e8c73923d0e | /venv/Lib/site-packages/neuralcoref/utils.py | 333ad28bbab947b38a586275274aca661ffe68f6 | [
"Apache-2.0"
] | permissive | vrian/orsen | ce34f74ea3a14c95d37ffa5c694b7c66725925df | 9c10148aba62868fad4b679a4b9b717829586e96 | refs/heads/master | 2023-01-21T21:47:06.210918 | 2018-06-23T04:46:26 | 2018-06-23T04:46:26 | 120,284,869 | 1 | 0 | Apache-2.0 | 2023-01-09T09:39:16 | 2018-02-05T09:44:03 | Python | UTF-8 | Python | false | false | 2,946 | py | # coding: utf8
"""Utils"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
from tqdm import tqdm
DISTANCE_BINS = list(range(5)) + [5]*3 + [6]*8 + [7]*16 +[8]*32
def encode_distance(x):
''' Encode an integer or an array of integers as a (bined) one-hot numpy array '''
def _encode_distance(d):
''' Encode an integer as a (bined) one-hot numpy array '''
dist_vect = np.zeros((11,))
if d < 64:
dist_vect[DISTANCE_BINS[d]] = 1
else:
dist_vect[9] = 1
dist_vect[10] = min(float(d), 64.0) / 64.0
return dist_vect
if isinstance(x, np.ndarray):
arr_l = [_encode_distance(y)[np.newaxis, :] for y in x]
out_arr = np.concatenate(arr_l)
else:
out_arr = _encode_distance(x)
return out_arr
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=10):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ThreadPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for _ in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for future in tqdm(futures):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out
| [
"[email protected]"
] | |
3b383bca73c7c19fda1fe4eea52bb0918a5d55c5 | 16cb8cc18d92d4018f9ee3044565cf22d4daef70 | /Lab0/Python/7_Loops.py | 0e44f466e1bfeb11cff40ae4017307015a92b838 | [] | no_license | zx-joe/Computational-Motor-Control-for-Salamandar-Robot | c13ac105d73b283ac86c00a00a7b25b28e3713af | c66d23fb8365e4b12263bb4115a30d708d42dbb2 | refs/heads/master | 2022-12-12T12:23:57.573980 | 2020-09-08T09:05:28 | 2020-09-08T09:05:28 | 256,481,679 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | #!/usr/bin/env python3
"""This script introduces you to the usage of Loops in Python.
Loops are useful to repeatedly to do a task over and over again.
Here we look at for and while loops in particular"""
import farms_pylog as pylog # import farms_pylog for log messages
### FOR LOOPS AND WHILE LOOPS ###
pylog.info(3*'\t' + 20*'#' + ' FOR AND WHILE LOOPS ' + 20*'#' + 3*'\n')
# range returns a list of integers (Python 2) or a sequence (Python 3)
# returns [0, 1, 2]: includes start value but excludes stop value
pylog.info('Using range method between 0 and 3 {}'.format(
list(range(0, 3))))
pylog.info('A very useful method for iteration')
pylog.warning('Includes start value but excludes the stop values')
list(range(3)) # equivalent: default start value is 0
list(range(0, 5, 2)) # returns [0, 2, 4]: third argument is the step value
# Python 2 only: use xrange to create a sequence rather than a list (saves
# memory)
list(range(100, 100000, 5))
# for loop (not the recommended style)
fruits = ['apple', 'banana', 'cherry']
pylog.warning('Not a Recommended style')
for i in range(len(fruits)):
pylog.info((fruits[i].upper()))
# for loop (recommended style)
pylog.warning('Recommended style')
for fruit in fruits:
pylog.info((fruit.upper()))
# iterate through two things at once (using tuple unpacking)
family = {'dad': 'homer', 'mom': 'marge', 'size': 6}
pylog.info('Iterating over two things at once :')
for key, value in list(family.items()):
pylog.info((key, value))
# use enumerate if you need to access the index value within the loop
pylog.info('Indexing the list')
for index, fruit in enumerate(fruits):
pylog.info((index, fruit))
# for/else loop
for fruit in fruits:
if fruit == 'banana':
pylog.info('Found the banana!')
break # exit the loop and skip the 'else' block
else:
# this block executes ONLY if the for loop completes without hitting
# 'break'
pylog.info("Can't find the banana")
# while loop
count = 0
while count < 5:
pylog.info('This will print 5 times')
count += 1 # equivalent to 'count = count + 1'
| [
"[email protected]"
] | |
c5b9bd010de9df17ce44d3ced4ccf69cf11a0deb | 2071325c958baeccf009fd63803d459b809ec435 | /tadh/index.py | d59a7b735c6e3205ca9be63cac5931a1f68b0441 | [] | no_license | timtadh/codegolf | fd18eccaadf1a9d6c5c93026d28bee6914993268 | 434bc3fdc3881a993184ce54042b074b134ce440 | refs/heads/master | 2021-01-18T10:53:51.397524 | 2012-04-24T06:16:34 | 2012-04-24T06:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | import collections, os, re, sys, json, timeit, time
from tst.suffix import SuffixTree
from tst import TST
stop = {'a', 'an', 'the', 'their', 'my', 'me', 'mine', 'my', 'i', 'am', 'but',
'is', "isn't", 'was', "wasn't"}
index = SuffixTree()
reviews = dict()
results = dict()
def clean(text):
return (
text
.lower()
.replace('/', '')
.replace('(', '')
.replace(')', '')
.replace(':', '')
.replace('.', '')
.replace(',', '')
.replace(';', '')
.replace(';', '')
.replace('?', ' ?')
.replace('!', ' !')
.replace('-', ' - '))
def index_review(revid, review):
revid = revid.strip()
text = clean(review.strip().lower())
reviews[revid] = (id, text)
for word in set(text.split())-stop:
revs = index.get(word, set())
if not revs:
revs.add(revid)
index[word] = revs
else:
revs.add(revid)
def mkindex(fname):
print fname
with open(fname, 'r') as f:
for i, line in enumerate(f):
#if i > 100: break
if i % 100 == 0:
print i
sys.stdout.flush()
revid, review = line.split(':', 1)
index_review(revid, review)
def query(*substrs):
ssres = [re.compile('.{0,35}%s.{0,35}'%substr.replace('?', '\?')) for substr in substrs]
def f_index():
for substr in substrs:
list(index.find(substr))
def f_brute():
for substr in substrs:
[text.find(substr) for id, text in reviews.values()]
#import pdb
#pdb.set_trace()
#print timeit.timeit(f_index, number=10)
#print timeit.timeit(f_brute, number=10)
sets = [set() for substr in substrs]
for i,substr in enumerate(substrs):
for word, revids in index.find(substr):
sets[i] |= revids
revids = sets[0]
for rvs in sets[1:]:
revids &= rvs
revids = [revid.decode('utf8') for revid in revids]
results[' '.join(substrs).decode('utf8')] = revids
print json.dumps(revids)
def main():
mkindex(sys.argv[1])
print len(reviews)
#sys.stderr.write('repeater.py: starting\n')
sys.stdout.flush()
while True:
sys.stdout.write('> '); sys.stdout.flush()
try: inpt = sys.stdin.readline()
except: break;
#if inpt is None: break;
if not inpt:
continue
inpt = clean(inpt)
#sys.stdout.write(inpt)
#sys.stdout.flush()
inpt = inpt.split()
query(*inpt)
sys.stdout.flush()
time.sleep(1)
print 'finished'
#print >>sys.stderr, results
with open('results.json', 'w') as f:
json.dump(results, f)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
42836521a575f8c077b4bfebb8d8e2419be572af | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4051/527004051.py | 8c9efc8520e73dc4035dfeae8878f979b3e8fff1 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,610 | py | from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MD',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BR', MIN: 1, MAX: 1},
{ID: 'G62', MIN: 0, MAX: 5},
{ID: 'NTE', MIN: 0, MAX: 5},
{ID: 'LM', MIN: 0, MAX: 50, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'N1', MIN: 1, MAX: 20, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'G61', MIN: 0, MAX: 5},
]},
{ID: 'LIN', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'CS', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'RCD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'G62', MIN: 0, MAX: 10},
{ID: 'GF', MIN: 0, MAX: 1},
{ID: 'DD', MIN: 0, MAX: 100},
{ID: 'N9', MIN: 0, MAX: 5},
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'NTE', MIN: 0, MAX: 5},
{ID: 'G66', MIN: 0, MAX: 5},
{ID: 'LM', MIN: 0, MAX: 25, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'CS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PO4', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 5},
{ID: 'G62', MIN: 0, MAX: 5},
{ID: 'G69', MIN: 0, MAX: 5},
{ID: 'LM', MIN: 0, MAX: 25, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
{ID: 'N1', MIN: 0, MAX: 25, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'G61', MIN: 0, MAX: 1},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'G62', MIN: 0, MAX: 10},
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 50, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 100, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
5eda38602c9897aa34ee18f8b6a594549d4df83c | 4719f3ef5a0d40c4426a4ac8c9307fc4631b8eea | /tests/test_borda.py | 36de3d7b168b9e3a7a42fd7fafbb598650240d4c | [
"MIT"
] | permissive | ozcan-durak/elsim | 08104b9c8820e412d93e9cc91b5e0179151cbec5 | 3e0e53adc1579ba1ab9c429d05d772dad2c6eb5b | refs/heads/master | 2022-12-05T13:23:23.200159 | 2020-08-18T01:09:52 | 2020-08-18T04:25:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,681 | py | import random
import numpy as np
import pytest
from hypothesis import given
from hypothesis.strategies import integers, lists, permutations
from elsim.methods import borda
def collect_random_results(method, election):
"""
Run multiple elections with tiebreaker='random' and collect the set of all
winners.
"""
random.seed(47) # Deterministic test
winners = set()
for trial in range(10):
winner = method(election, tiebreaker='random')
assert isinstance(winner, int)
winners.add(winner)
return winners
@pytest.mark.parametrize("tiebreaker", [None, 'random', 'order'])
def test_basic(tiebreaker):
# Standard Tennessee example
# https://en.wikipedia.org/wiki/Template:Tenn_voting_example
Memphis, Nashville, Chattanooga, Knoxville = 0, 1, 2, 3
election = [*42*[[Memphis, Nashville, Chattanooga, Knoxville]],
*26*[[Nashville, Chattanooga, Knoxville, Memphis]],
*15*[[Chattanooga, Knoxville, Nashville, Memphis]],
*17*[[Knoxville, Chattanooga, Nashville, Memphis]],
]
assert borda(election, tiebreaker) == Nashville
# Example from Ques 9
# http://www.yorku.ca/bucovets/4380/exercises/exercises_1_a.pdf
v, w, x, y, z = 0, 1, 2, 3, 4
election = [*11*[[v, w, x, y, z]],
*12*[[w, x, y, z, v]],
*13*[[x, v, w, y, z]],
*14*[[y, w, v, z, x]],
*15*[[z, v, x, w, y]],
]
assert borda(election, tiebreaker) == w
# Manually calculated correct answer
election = [[0, 1, 4, 3, 2],
[4, 2, 3, 1, 0],
[4, 2, 3, 1, 0],
[3, 2, 1, 4, 0],
[2, 0, 3, 1, 4],
[3, 2, 1, 4, 0],
]
assert borda(election, tiebreaker) == 2
# Example from
# https://www3.nd.edu/~apilking/math10170/information/Lectures/Lecture-2.Borda%20Method.pdf
K, H, R = 0, 1, 2
election = [*2*[[K, H, R]],
*3*[[H, R, K]],
*2*[[H, K, R]],
*3*[[R, H, K]],
]
assert borda(election, tiebreaker) == H
# Example from
# http://jlmartin.faculty.ku.edu/~jlmartin/courses/math105-F11/Lectures/chapter1-part2.pdf
A, B, C, D = 0, 1, 2, 3
election = [*14*[[A, B, C, D]],
*10*[[C, B, D, A]],
* 8*[[D, C, B, A]],
* 4*[[B, D, C, A]],
* 1*[[C, D, B, A]],
]
assert borda(election, tiebreaker) == B
election = [*60*[[A, B, C, D]],
*40*[[B, D, C, A]],
]
assert borda(election, tiebreaker) == B
# Table 3.1 from Mackie - Democracy Defended
A, B, C, D, E = 0, 1, 2, 3, 4
election = [*4*[[A, E, D, C, B]],
*3*[[B, C, E, D, A]],
*2*[[C, D, E, B, A]],
]
assert borda(election, tiebreaker) == E # "to E the Borda winner"
# Example from
# https://medium.com/@t2ee6ydscv/how-ranked-choice-voting-elects-extremists-fa101b7ffb8e
r, b, g, o, y = 0, 1, 2, 3, 4
election = [*31*[[r, b, g, o, y]],
* 5*[[b, r, g, o, y]],
* 8*[[b, g, r, o, y]],
* 1*[[b, g, o, r, y]],
* 6*[[g, b, o, r, y]],
* 1*[[g, b, o, y, r]],
* 6*[[g, o, b, y, r]],
* 2*[[o, g, b, y, r]],
* 5*[[o, g, y, b, r]],
* 7*[[o, y, g, b, r]],
*28*[[y, o, g, b, r]],
]
assert borda(election) == g
def test_ties():
# Two-way tie between candidates 1 and 2
election = np.array([[0, 1, 2],
[0, 2, 1],
[1, 2, 0],
[1, 2, 0],
[1, 2, 0],
[2, 1, 0],
[2, 1, 0],
[2, 1, 0],
])
# No tiebreaker:
assert borda(election, tiebreaker=None) is None
# Mode 'order' should always prefer lowest candidate ID
assert borda(election, tiebreaker='order') == 1
# Mode 'random' should choose all tied candidates at random
assert collect_random_results(borda, election) == {1, 2}
# Three-way tie between 0, 1, and 2
election = np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[1, 2, 0],
[1, 2, 0],
[1, 2, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1],
])
# No tiebreaker:
assert borda(election, tiebreaker=None) is None
# Mode 'order' should always prefer lowest candidate ID
assert borda(election, tiebreaker='order') == 0
# Mode 'random' should choose all tied candidates at random
assert collect_random_results(borda, election) == {0, 1, 2}
def complete_ranked_ballots(min_cands=3, max_cands=25, min_voters=1,
max_voters=100):
n_cands = integers(min_value=min_cands, max_value=max_cands)
return n_cands.flatmap(lambda n: lists(permutations(range(n)),
min_size=min_voters,
max_size=max_voters))
@pytest.mark.parametrize("tiebreaker", ['random', 'order'])
@given(election=complete_ranked_ballots(min_cands=1, max_cands=25,
min_voters=1, max_voters=100))
def test_legit_winner(election, tiebreaker):
election = np.asarray(election)
n_cands = election.shape[1]
winner = borda(election, tiebreaker)
assert isinstance(winner, int)
assert winner in range(n_cands)
@given(election=complete_ranked_ballots(min_cands=1, max_cands=25,
min_voters=1, max_voters=100))
def test_legit_winner_none(election):
election = np.asarray(election)
n_cands = election.shape[1]
winner = borda(election)
assert isinstance(winner, (int, type(None)))
assert winner in set(range(n_cands)) | {None}
if __name__ == "__main__":
# Run unit tests, in separate process to avoid warnings about cached
# modules, printing output line by line in realtime
from subprocess import Popen, PIPE
with Popen(['pytest',
'--tb=short', # shorter traceback format
'--hypothesis-show-statistics',
str(__file__)], stdout=PIPE, bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
print(line, end='')
| [
"[email protected]"
] | |
8303d30c8032f5f4d4810c52bb87dece4b05a65d | 08a1d871f4be9ea61497751845a5ed9abe2a1012 | /farbox_bucket/utils/cli_color.py | 5819c5e25a1ad977f067ed1c0eba565f31526df8 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | itfanr/FarBox | 5bfff706439a6a223f531cfa36100ac21ed4878b | daeda4f5080467f1ddf4b60424b8562f914756bd | refs/heads/master | 2023-04-19T07:23:28.824231 | 2021-05-07T02:29:06 | 2021-05-07T02:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | #coding: utf8
from __future__ import absolute_import, print_function
#CSI="\x1B["
#RED = CSI+"31;40m"
#GREEN = CSI+'32;40m'
#RESET =CSI+"m"
FLAGS = dict(
RESET = "\x1B[0m",
BOLD = "\x1B[1m",
DIM = "\x1B[2m",
UNDER = "\x1B[4m",
REVERSE = "\x1B[7m",
HIDE = "\x1B[8m",
CLEARSCREEN = "\x1B[2J",
CLEARLINE = "\x1B[2K",
BLACK = "\x1B[30m",
RED = "\x1B[31m",
GREEN = "\x1B[32m",
YELLOW = "\x1B[33m",
BLUE = "\x1B[34m",
MAGENTA = "\x1B[35m",
CYAN = "\x1B[36m",
WHITE = "\x1B[37m",
BBLACK = "\x1B[40m",
BRED = "\x1B[41m",
BGREEN = "\x1B[42m",
BYELLOW = "\x1B[43m",
BBLUE = "\x1B[44m",
BMAGENTA = "\x1B[45m",
BCYAN = "\x1B[46m",
BWHITE = "\x1B[47m",
NEWLINE = "\r\n\x1B[0m",
)
def print_with_color(strings, color='red', end='\r\n'):
color = FLAGS.get(color.upper())
if color:
print(color + strings + FLAGS['RESET'], end=end)
else:
print(strings)
def print_colorful_parts(string_parts, end=''):
for strings, color in string_parts:
print_with_color(strings, color, end)
print(FLAGS['NEWLINE'], end='')
if __name__ == '__main__':
print_with_color('hello', 'green', end=' ')
print_with_color('hello', 'blue')
print_colorful_parts(
[('hello', 'magenta'),
('world', 'yellow'),
('hello', 'red'),
('world', 'cyan')],
end=' '
)
| [
"[email protected]"
] | |
701af4a6dea98585d23863e6340949745d1980e6 | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd1912/py02/day02/myfunc.py | 684e88105205263137758b3b560f5844088f2eac | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def func1(x):
if x == 1:
return 1
return x * func1(x- 1)
# 5 * func1(4)
# 5 * 4 * func1(3)
# 5 * 4 * 3 * func1(2)
# 5 * 4 * 3 * 2 * func1(1)
# 5 * 4 * 3 * 2 * 1
if __name__ == '__main__':
print(func1(5))
| [
"[email protected]"
] | |
2c3ddf59ef5bbc9b91706cc3b505f3e28ba85471 | 1ade02a8e0c6d7e442c9d9041f15518d22da3923 | /w8/mock_phase2/run/core/controllers/generic.py | 4478b8a5012a51fa7a71dcc21a18532f1804d5fc | [] | no_license | fodisi/ByteAcademy-Bootcamp | 7980b80636a36db6da3e0fc0e529fbc6b8e097e0 | d53e3f4864f6cba1b85e806c29b01c48e3c2e81d | refs/heads/master | 2020-03-19T12:55:31.489638 | 2018-07-25T16:19:19 | 2018-07-25T16:19:19 | 136,550,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | #!/usr/bin/env python3
from flask import Blueprint, render_template
controller = Blueprint('generic', __name__)
@controller.route('/<product_name>')
def home(product_name):
# obj = model.get_product(product_name)
return render_template('index.html', json_obj=obj.to_json)
| [
"[email protected]"
] | |
8da80ee62fb2a9c9ee57874efa1a8a69dc421479 | 694d57c3e512ce916269411b51adef23532420cd | /leetcode/23merge_k_sorted_lists.py | 76d8e6ea698631516ea4cfdd1e87899d4de8cc45 | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | import heapq
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeKLists(self, lists):
new_head = None
heap = []
for node in lists:
if node:
heapq.heappush(heap, (node.val, node))
if heap:
new_head = heapq.heappop(heap)[1]
if new_head.next:
heapq.heappush(heap, (new_head.next.val, new_head.next))
pre_node = new_head
while heap:
curr_node = heapq.heappop(heap)[1]
if curr_node.next:
heapq.heappush(heap, (curr_node.next.val, curr_node.next))
pre_node.next = curr_node
pre_node = curr_node
return new_head
if __name__ == '__main__':
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node6 = ListNode(6)
node1.next = node3
node3.next = node5
node2.next = node4
node4.next = node6
sol = Solution()
new_head = sol.mergeKLists([node1, node2])
print new_head.val
print new_head.next.val
print new_head.next.next.val
print new_head.next.next.next.val
print new_head.next.next.next.next.val
print new_head.next.next.next.next.next.val
print sol.mergeKLists([[]])
| [
"[email protected]"
] | |
515c154e112ed44885fb11d8cfbd74d1c10c102d | 1b070c5fabfe7e804eac4c7d706f6ccdf6b29ed0 | /partners/migrations/0005_auto_20200620_0044.py | bfe6abd78feb964304d45149cd068cdcdae946a8 | [
"MIT"
] | permissive | cZachJohnson/My-Business | ef80dae6458c2fb7a08465d29e32f9405e52e43d | 792bb13a5b296260e5de7e03fba6445a13922851 | refs/heads/master | 2023-08-25T06:51:39.330270 | 2021-10-25T01:20:34 | 2021-10-25T01:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Generated by Django 2.2.12 on 2020-06-20 00:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("partners", "0004_auto_20200609_0514"),
]
operations = [
migrations.AddField(
model_name="partner",
name="created_at",
field=models.DateTimeField(
auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0)
),
preserve_default=False,
),
migrations.AddField(
model_name="partner",
name="updated_at",
field=models.DateTimeField(auto_now=True),
),
]
| [
"[email protected]"
] | |
6347debad11c0a6b40a7f94a3ab778d780943f36 | 24a88b7dd4d81763fd4212a42c4a73f4c35f8ffc | /apiREST/api/serializers.py | f92010912774eedc0733526c21deca79cd4e444b | [] | no_license | junkluis/leccionMVC | d001c122318dde065ffd9a88aaaad0b7b4533a05 | c311e69f2ae6d102651f9f7e6fc1f9750fc9e4bc | refs/heads/master | 2021-01-15T19:27:48.875724 | 2017-08-14T14:35:29 | 2017-08-14T14:35:29 | 99,823,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from rest_framework import serializers
from .models import *
class ticketSerializer(serializers.ModelSerializer):
class Meta:
model = ticket
fields = ('fechaEmision', 'Precio', 'Adquiriente', 'Puesto', 'Origen', 'Destino') | [
"[email protected]"
] | |
adf34bdb17df662959d03197aa497d4f9a4eccc1 | 9bbf429d2c2e2f20345d613a719cf01e8f9a0bff | /project/settings.py | 6c1e92b7cc86c64ef10a85cf6336b520a2f2d545 | [] | no_license | sandglasscao/ENU | f78f8a8dfaf3263587885b0622ab6d3182012375 | e3c26fd57f8ef582da576e1cc28b7eb42562c706 | refs/heads/master | 2021-01-23T05:19:03.175439 | 2017-04-14T09:24:22 | 2017-04-14T09:24:22 | 86,297,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,673 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
import datetime
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ON_PAAS = 'OPENSHIFT_REPO_DIR' in os.environ
if ON_PAAS:
SECRET_KEY = os.environ['OPENSHIFT_SECRET_TOKEN']
else:
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')_7av^!cy(wfx=k#3*7x+(=j^fzv+ot^1@sh9s9t=8$bu@r(z$'
# SECURITY WARNING: don't run with debug turned on in production!
# adjust to turn off when on Openshift, but allow an environment variable to override on PAAS
DEBUG = not ON_PAAS
DEBUG = DEBUG or os.getenv("debug", "false").lower() == "true"
if ON_PAAS and DEBUG:
print("*** Warning - Debug mode is on ***")
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'crispy_forms',
'userprofile',
'metadata',
'utility',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
'django.template.context_processors.i18n',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
if ON_PAAS:
# determine if we are on MySQL or POSTGRESQL
if "OPENSHIFT_POSTGRESQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'],
}
}
elif "OPENSHIFT_MYSQL_DB_USERNAME" in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_MYSQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_MYSQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_MYSQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_MYSQL_DB_PORT'],
}
}
else:
'''
# stock django, local development.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_MYSQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_MYSQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_MYSQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_MYSQL_DB_PORT'],
}
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['APP_NAME2'],
'USER': os.environ['MYSQL_DB_USERNAME'],
'PASSWORD': os.environ['MYSQL_DB_PASSWORD'],
'HOST': os.environ['MYSQL_DB_HOST'],
'PORT': os.environ['MYSQL_DB_PORT'],
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'America/Sao_Paulo'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'wsgi', 'static') # STATIC_ROOT is just for production env to collect all static resources
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, "static"),
)
if ON_PAAS:
MEDIA_ROOT = os.path.join(os.environ.get('OPENSHIFT_DATA_DIR'), 'media')
MEDIA_URL = '/static/media/'
else:
#MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media')
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEDIA_URL = STATIC_URL + 'media/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
AUTH_PROFILE_MODULE = 'userprofile.Profile'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# 'file': {
# 'level': 'INFO',
# 'class': 'logging.FileHandler',
# 'filename': 'bluepage.log',
# 'formatter': 'verbose'
# },
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'ERROR',
},
'case': {
'handlers': ['console'],
'level': 'DEBUG',
},
'page': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=30000),
}
| [
"root@localhost"
] | root@localhost |
3231911515adfd0365eae0b7ab08f656f1a18ce5 | 134c429df7d5c4d067d9761cb1435992b048adaf | /notes/0431/0431.py | 11bc6ce8611f6db618e1efae727ca798d3c25e41 | [] | no_license | PaulGuo5/Leetcode-notes | 65c6ebb61201d6f16386062e4627291afdf2342d | 431b763bf3019bac7c08619d7ffef37e638940e8 | refs/heads/master | 2021-06-23T09:02:58.143862 | 2021-02-26T01:35:15 | 2021-02-26T01:35:15 | 177,007,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
"""
class Codec:
# Encodes an n-ary tree to a binary tree.
def encode(self, root: 'Node') -> TreeNode:
if not root:
return None
new = TreeNode(root.val)
if not root.children:
return new
new.left = self.encode(root.children[0]) # node's children
node = new.left
for child in root.children[1:]: # node's sibling
node.right = self.encode(child)
node = node.right
return new
# Decodes your binary tree to an n-ary tree.
def decode(self, data: TreeNode) -> 'Node':
if not data:
return None
new = Node(data.val, [])
node = data.left
while node:
new.children.append(self.decode(node))
node = node.right
return new
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(root))
| [
"[email protected]"
] | |
6ca88a0dbb97f37b2015940e3978efcb9c8a9f0b | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/132160/kaggle-ndsb-master/configurations/[email protected] | 15acaee9e4b2e5862cb947b90c641219ff778759 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,904 | py | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
pre_init_path = "CONVROLL4_MODEL_FILE"
validation_split_path = "splits/bagging_split_27.pkl"
patch_size = (95, 95)
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 4
chunk_size = 32768 // 4
num_chunks_train = 580
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
420: 0.0003,
540: 0.00003,
}
validate_every = 20
save_every = 20
def estimate_scale(img):
return np.maximum(img.shape[0], img.shape[1]) / 85.0
# augmentation_transforms_test = []
# for flip in [True, False]:
# for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
# for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
# tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
# augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvRescaledDataLoader(estimate_scale=estimate_scale, num_chunks_train=num_chunks_train,
patch_size=patch_size, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_size[0], patch_size[1]))
l0c = dihedral.CyclicSliceLayer(l0)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral.CyclicConvRollLayer(l4)
l5a = Conv2DLayer(l4r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l5b = Conv2DLayer(l5a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l5c = Conv2DLayer(l5b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l5 = MaxPool2DLayer(l5c, ds=(3, 3), strides=(2, 2))
l5r = dihedral.CyclicConvRollLayer(l5)
l5f = nn.layers.flatten(l5r)
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5f, p=0.5), num_units=256, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l6r = dihedral.CyclicRollLayer(l6)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6r, p=0.5), num_units=256, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l7m = dihedral.CyclicPoolLayer(l7, pool_function=nn_plankton.rms)
l8 = nn.layers.DenseLayer(nn.layers.dropout(l7m, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
l_resume = l2
l_exclude = l2
return [l0], l8, l_resume, l_exclude
| [
"[email protected]"
] | |
6ceb9d1a80663a73976e941ebaa5c6143e75a5ce | c7e9ec5ce6627f6f68bab1b86a27a4516595154d | /consentrecords/migrations/0089_auto_20180123_2226.py | 738533784e45a55c62c2542c2229561d9a774b5b | [] | no_license | michaelcrubenstein/consentrecords | 7b79e82c9ad4b5efcfbb44a50ff1d4cadf7180e2 | 992fe78c68d1d5c083f9e2cc0e3e9aa24363b93d | refs/heads/master | 2021-01-23T19:28:13.807809 | 2018-07-03T16:10:34 | 2018-07-03T16:10:34 | 41,223,029 | 1 | 1 | null | 2018-07-03T16:10:35 | 2015-08-22T20:21:26 | JavaScript | UTF-8 | Python | false | false | 700 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-01-23 22:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consentrecords', '0088_auto_20171227_2107'),
]
operations = [
migrations.RemoveField(
model_name='experience',
name='timeframe',
),
migrations.RemoveField(
model_name='experiencehistory',
name='timeframe',
),
migrations.AlterField(
model_name='experience',
name='era',
field=models.IntegerField(db_index=True, null=True),
),
]
| [
"[email protected]"
] | |
1010a87e378223fd5275560d3cf5ea3eb1d65f07 | 017f02454cbb5616a9aa23e3ce76f84832378ec2 | /inferencia/task/pose_estimation/pose_estimation_2d/visualization/pose_estimation_2d_visualizer_factory.py | 451d1b7a92d7c0e6f4d200bafdc3e7bc2a2a02e1 | [
"Apache-2.0"
] | permissive | hampen2929/inferencia | a6e0f0b25abe95c3690ddfc7a225d4a4bdc2cb10 | c83563ff31d47cd441bb8ac3072df32a7fded0ee | refs/heads/main | 2023-08-18T13:53:32.882725 | 2021-09-18T12:15:52 | 2021-09-18T12:15:52 | 379,299,225 | 0 | 2 | Apache-2.0 | 2021-09-18T12:15:53 | 2021-06-22T14:30:36 | Jupyter Notebook | UTF-8 | Python | false | false | 813 | py | from .pose_estimation_2d_visualizer_name import PoseEstimation2DVisualizerName
from ..label.pose_estimation_2d_label_factory import PoseEstimation2DLabelFactory
class PoseEstimation2DVisualizerFactory():
def create(visualizer_name="PoseVisualizer",
label_name="COCOKeyPointLabel"):
if visualizer_name == PoseEstimation2DVisualizerName.pose_visualizer.value:
from .visualization.pose_vilualizer import PoseVilualizer
pose_label = PoseEstimation2DLabelFactory.create(
label_name=label_name)
pose_visualizer = PoseVilualizer(body_edges=pose_label.body_edges)
return pose_visualizer
else:
msg = "{} is not implemented".format(
visualizer_name)
raise NotImplementedError(msg)
| [
"[email protected]"
] | |
f9fade661402627c5a7c12936bed53fbd8d25454 | 50e90ce3870a66395aa2f4abd6a03c7e7de811e6 | /mishamovie/pipeline_steps/combine_frames.py | f92b81a9ca8a11193857ab424569e84310ab838d | [] | no_license | jgc128/mishamovie | c8dd960756d1cef7504d6060d1d7ceca89869c91 | ce8864fb311bff4c0936046a7efe121b5e5f8a3b | refs/heads/main | 2023-06-08T21:28:30.605756 | 2021-07-04T20:59:32 | 2021-07-04T20:59:32 | 361,487,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | """
This script trains a simple model Cats and Dogs dataset and saves it in the SavedModel format
"""
import argparse
import subprocess
def parse_args():
parser = argparse.ArgumentParser(description='Split video into frames')
parser.add_argument('--input_dir', help='Input dir', required=True)
parser.add_argument('--output_dir', help='Output dir', required=True)
parser.add_argument('--output_filename', help='Output file name', required=True)
parser.add_argument('--input_name_template', default='frame_%5d.png', help='Input name template', required=False)
parser.add_argument('--fps', default=30, type=int, help='Frames per second', required=False)
args = parser.parse_args()
return args
def main():
# TODO: fmpeg -i face_close_aged_long.mp4 -filter "minterpolate='fps=120'" zzzz3.mp4
# https://superuser.com/a/1185430
# https://github.com/dthpham/butterflow
args = parse_args()
print(args)
input_filename = f'{args.input_dir}/{args.input_name_template}'
output_filename = f'{args.output_dir}/{args.output_filename}'
cmd = [
'ffmpeg', '-y', '-framerate', str(args.fps), '-i', input_filename,
'-c:v', 'libx264', '-vf', f'fps={args.fps},pad=ceil(iw/2)*2:ceil(ih/2)*2', '-pix_fmt', 'yuv420p',
output_filename
]
subprocess.run(cmd, check=True)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9112a8d3de06671acf5e9fded056dc27c0c8b4a3 | 55726b4940cec0e9df9ba90ab69b27b81c283740 | /DjangoBlog/admin_site.py | d90ec5e4ee94b395af46df4d557eb97ac712a51a | [
"MIT"
] | permissive | zlaiyyf/DjangoBlog | fe655c62f74e929cd874d095cc2f8bf48739bd0d | ccb67c9f08a9b6b8ca65828fece34cda89135187 | refs/heads/master | 2022-12-27T05:06:27.578712 | 2020-10-11T07:47:46 | 2020-10-11T07:47:46 | 264,558,604 | 1 | 0 | MIT | 2020-05-17T01:09:08 | 2020-05-17T01:09:07 | null | UTF-8 | Python | false | false | 2,014 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: [email protected]
@site: https://www.lylinux.net/
@software: PyCharm
@file: admin_site.py
@time: 2018/1/7 上午2:21
"""
from django.contrib.admin import AdminSite
from DjangoBlog.utils import get_current_site
from django.contrib.sites.admin import SiteAdmin
from django.contrib.admin.models import LogEntry
from django.contrib.sites.models import Site
from DjangoBlog.logentryadmin import LogEntryAdmin
from blog.admin import *
from accounts.admin import *
from oauth.admin import *
from servermanager.admin import *
from comments.admin import *
from owntracks.admin import *
class DjangoBlogAdminSite(AdminSite):
site_header = 'DjangoBlog administration'
site_title = 'DjangoBlog site admin'
def __init__(self, name='admin'):
super().__init__(name)
def has_permission(self, request):
return request.user.is_superuser
# def get_urls(self):
# urls = super().get_urls()
# from django.urls import path
# from blog.views import refresh_memcache
#
# my_urls = [
# path('refresh/', self.admin_view(refresh_memcache), name="refresh"),
# ]
# return urls + my_urls
admin_site = DjangoBlogAdminSite(name='admin')
admin_site.register(Article, ArticlelAdmin)
admin_site.register(Category, CategoryAdmin)
admin_site.register(Tag, TagAdmin)
admin_site.register(Links, LinksAdmin)
admin_site.register(SideBar, SideBarAdmin)
admin_site.register(BlogSettings, BlogSettingsAdmin)
admin_site.register(commands, CommandsAdmin)
admin_site.register(EmailSendLog, EmailSendLogAdmin)
admin_site.register(BlogUser, BlogUserAdmin)
admin_site.register(Comment, CommentAdmin)
admin_site.register(OAuthUser, OAuthUserAdmin)
admin_site.register(OAuthConfig, OAuthConfigAdmin)
admin_site.register(OwnTrackLog, OwnTrackLogsAdmin)
admin_site.register(Site, SiteAdmin)
admin_site.register(LogEntry, LogEntryAdmin)
| [
"[email protected]"
] | |
d6ae857b950288a40f300ca27e242e60df1df9a0 | 4f807eb45da63a633f32425908a4acf19462d96f | /python/src/yatzy/YatzyTest.py | 0b2bf06d8f872a60d16e9e5cdc496e24a2794c6a | [] | no_license | mebusw/Test-Driven-Development | 580c3f31ee0f406afa8d7761bf82acd67cfcd166 | 7a49f2615a78a1cedbb909e60e0232e5e1467287 | refs/heads/master | 2021-01-22T06:32:07.448971 | 2017-02-02T04:56:40 | 2017-02-02T04:56:40 | 5,836,643 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | """
The game of yatzy is a simple dice game. Each player
rolls five six-sided dice. The player places the roll in
a category, such as ones, twos, fives, pair, two pairs
etc (see below).
If the roll is compatible with the category, the player
gets a score for the roll according to the rules. If the
roll is not compatible with the category, the player scores
zero for the roll.
For example, if a player rolls 5,6,5,5,2 and scores the
dice in the fives category they would score 15 (three fives).
Your task is to score a GIVEN roll in a GIVEN category.
You do NOT have to program the random dice rolling.
You do NOT have to program re-rolls (as in the real game).
You do NOT play by letting the computer choose the highest
scoring category for a given roll.
Yatzy Categories and Scoring Rules
==================================
Chance:
The player scores the sum of all dice,
no matter what they read.
For example,
1,1,3,3,6 placed on "chance" scores 14 (1+1+3+3+6)
4,5,5,6,1 placed on "chance" scores 21 (4+5+5+6+1)
Yatzy:
If all dice have the same number,
the player scores 50 points.
For example,
1,1,1,1,1 placed on "yatzy" scores 50
5,5,5,5,5 placed on "yatzy" scores 50
1,1,1,2,1 placed on "yatzy" scores 0
Ones, Twos, Threes, Fours, Fives, Sixes:
The player scores the sum of the dice that reads one,
two, three, four, five or six, respectively.
For example,
1,1,2,4,4 placed on "fours" scores 8 (4+4)
2,3,2,5,1 placed on "twos" scores 4 (2+2)
3,3,3,4,5 placed on "ones" scores 0
Pair:
If exactly two dice have the same value then
the player scores the sum of the two highest matching dice.
For example, when placed on "pair"
3,3,3,4,4 scores 8 (4+4)
1,1,6,2,6 scores 12 (6+6)
3,3,3,4,1 scores 0
3,3,3,3,1 scores 0
Two pairs:
If exactly two dice have the same value and exactly
two dice have a different value then the
player scores the sum of these four dice.
For example, when placed on "two pairs"
1,1,2,3,3 scores 8 (1+1+3+3)
1,1,2,3,4 scores 0
1,1,2,2,2 scores 0
Three of a kind:
If there are exactly three dice with the same number
then the player scores the sum of these dice.
For example, when placed on "three of a kind"
3,3,3,4,5 scores 9 (3+3+3)
3,3,4,5,6 scores 0
3,3,3,3,1 scores 0
Four of a kind:
If there are exactly four dice with the same number
then the player scores the sum of these dice.
For example, when placed on "four of a kind"
2,2,2,2,5 scores 8 (2+2+2+2)
2,2,2,5,5 scores 0
2,2,2,2,2 scores 0
Small straight:
When placed on "small straight", if the dice read
1,2,3,4,5, the player scores 15 (the sum of all the dice).
Large straight:
When placed on "large straight", if the dice read
2,3,4,5,6, the player scores 20 (the sum of all the dice).
Full house:
If the dice are two of a kind and three of a different kind
then the player scores the sum of all five dice.
For example, when placed on "full house"
1,1,2,2,2 scores 8 (1+1+2+2+2)
2,2,3,3,4 scores 0
4,4,4,4,4 scores 0
"""
import unittest
from itertools import groupby
class Game(object):
@staticmethod
def chance(*dice):
return sum(dice)
@staticmethod
def yatzy(*dice):
return 50 if all(d == dice[0] for d in dice) else 0
@staticmethod
def _single(n):
return lambda *dice: n * dice.count(n)
def __init__(self):
self.sixes = Game._single(6)
self.fives = Game._single(5)
self.fours = Game._single(4)
self.threes = Game._single(3)
self.twos = Game._single(2)
self.ones = Game._single(1)
def pair(self, *dice):
for b in self._buckets(dice):
if b[1] == 2:
return b[0] * 2
return 0
def _buckets(self, dice):
g = groupby(sorted(dice, reverse=True), lambda x: x)
buckets = [(m, len(list(n))) for m, n in g]
buckets = sorted(buckets, key=lambda x: x[1], reverse=True)
return buckets
def three_kind(self, *dice):
b = self._buckets(dice)
if b[0][1] == 3:
return b[0][0] * 3
return 0
def four_kind(self, *dice):
b = self._buckets(dice)
if b[0][1] == 4:
return b[0][0] * 4
return 0
def two_pairs(self, *dice):
b = self._buckets(dice)
if b[0][1] == 2 and b[1][1] == 2:
return b[0][0] * 2 + b[1][0] * 2
return 0
def straight(self, *dice):
if all(z[0] == z[1] + 1 for z in zip(dice[1:], dice[:-1])):
return sum(dice)
return 0
def full_house(self, *dice):
b = self._buckets(dice)
if b[0][1] == 3 and b[1][1] == 2:
return sum(dice)
return 0
class YatzyTest(unittest.TestCase):
def setUp(self):
pass
def test_chance(self):
self.assertEquals(1 + 1 + 3 + 3 + 6, Game.chance(1, 1, 3, 3, 6))
self.assertEquals(4 + 5 + 5 + 6 + 1, Game.chance(4, 5, 5, 6, 1))
def test_yatzy(self):
self.assertEquals(50, Game.yatzy(1, 1, 1, 1, 1))
self.assertEquals(50, Game.yatzy(5, 5, 5, 5, 5))
self.assertEquals(0, Game.yatzy(1, 1, 1, 2, 1))
def test_ones(self):
self.assertEquals(4 + 4, Game().fours(1, 1, 2, 4, 4))
self.assertEquals(2 + 2, Game().twos(2, 3, 2, 5, 1))
self.assertEquals(0, Game().ones(3, 3, 3, 4, 5))
def test_pair(self):
self.assertEquals(4 + 4, Game().pair(3, 3, 3, 4, 4))
self.assertEquals(6 + 6, Game().pair(1, 1, 6, 2, 6))
self.assertEquals(0, Game().pair(3, 3, 3, 4, 1))
self.assertEquals(0, Game().pair(3, 3, 3, 3, 1))
def test_two_pairs(self):
self.assertEquals(1 + 1 + 3 + 3, Game().two_pairs(1, 1, 2, 3, 3))
self.assertEquals(0, Game().two_pairs(1, 1, 2, 3, 4))
self.assertEquals(0, Game().two_pairs(1, 1, 2, 2, 2))
def test_three_of_a_kind(self):
self.assertEquals(3 + 3 + 3, Game().three_kind(3, 3, 3, 4, 5))
self.assertEquals(0, Game().three_kind(3, 3, 4, 5, 6))
self.assertEquals(0, Game().three_kind(3, 3, 3, 3, 1))
def test_four_of_a_kind(self):
self.assertEquals(2 + 2 + 2 + 2, Game().four_kind(2, 2, 2, 2, 5))
self.assertEquals(0, Game().four_kind(2, 2, 2, 5, 5))
self.assertEquals(0, Game().four_kind(2, 2, 2, 2, 2))
def test_straight(self):
self.assertEquals(1 + 2 + 3 + 4 + 5, Game().straight(1, 2, 3, 4, 5))
self.assertEquals(2 + 3 + 4 + 5 + 6, Game().straight(2, 3, 4, 5, 6))
def test_full_house(self):
self.assertEquals(1 + 1 + 2 + 2 + 2, Game().full_house(1, 1, 2, 2, 2))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"[email protected]"
] | |
4c5e15fd9822cbc0c71851e74db43be6f4bfc722 | 1626e16760c9c5b5dc9bd7c345871c716d5ffd99 | /Problems/2400_2499/2475_Number_of_Unequal_Triplets_in_Array/Project_Python3/Number_of_Unequal_Triplets_in_Array.py | 4242af6250379a0c5a304c43d89f7b0342a51492 | [] | no_license | NobuyukiInoue/LeetCode | 94ddb19e63cb8d0775cdc13f311fe90c87a1d718 | 3f0ffd519404165fd1a735441b212c801fd1ad1e | refs/heads/master | 2023-09-01T07:38:50.939942 | 2023-08-23T09:51:17 | 2023-08-23T09:51:17 | 158,100,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | # coding: utf-8
import collections
import os
import sys
import time
from typing import List, Dict, Tuple
class Solution:
def unequalTriplets(self, nums: List[int]) -> int:
# 35ms - 71ms
trips = pairs = 0
cnts = collections.Counter()
for i, num in enumerate(nums):
trips += pairs - cnts[num] * (i - cnts[num])
pairs += i - cnts[num]
cnts[num] += 1
return trips
def main():
argv = sys.argv
argc = len(argv)
if argc < 2:
print("Usage: python {0} <testdata.txt>".format(argv[0]))
exit(0)
if not os.path.exists(argv[1]):
print("{0} not found...".format(argv[1]))
exit(0)
testDataFile = open(argv[1], "r")
lines = testDataFile.readlines()
for temp in lines:
temp = temp.strip()
if temp == "":
continue
print("args = {0}".format(temp))
loop_main(temp)
# print("Hit Return to continue...")
# input()
def loop_main(temp):
flds = temp.replace("[","").replace("]","").replace(", ",",").rstrip()
nums = [int(n) for n in flds.split(",")]
print("nums = {0}".format(nums))
sl = Solution()
time0 = time.time()
result = sl.unequalTriplets(nums)
time1 = time.time()
print("result = {0:d}".format(result))
print("Execute time ... : {0:f}[s]\n".format(time1 - time0))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
24a7e0e511a2d8d8023e5a267a26f01231db6504 | bc6492a9a30ac7228caad91643d58653b49ab9e3 | /sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py | 37a8673cd2de946f7185811aa561c209c9982994 | [] | no_license | cosmosZhou/sagemath | 2c54ea04868882340c7ef981b7f499fb205095c9 | 0608b946174e86182c6d35d126cd89d819d1d0b8 | refs/heads/master | 2023-01-06T07:31:37.546716 | 2020-11-12T06:39:22 | 2020-11-12T06:39:22 | 311,177,322 | 1 | 0 | null | 2020-11-12T06:09:11 | 2020-11-08T23:42:40 | Python | UTF-8 | Python | false | false | 2,288 | py | import sympy.physics.mechanics as me
import sympy as sm
import math as m
import numpy as np
g, lb, w, h = sm.symbols('g lb w h', real=True)
theta, phi, omega, alpha = me.dynamicsymbols('theta phi omega alpha')
thetad, phid, omegad, alphad = me.dynamicsymbols('theta phi omega alpha', 1)
thetad2, phid2 = me.dynamicsymbols('theta phi', 2)
frame_n = me.ReferenceFrame('n')
body_a_cm = me.Point('a_cm')
body_a_cm.set_vel(frame_n, 0)
body_a_f = me.ReferenceFrame('a_f')
body_a = me.RigidBody('a', body_a_cm, body_a_f, sm.symbols('m'), (me.outer(body_a_f.x,body_a_f.x),body_a_cm))
body_b_cm = me.Point('b_cm')
body_b_cm.set_vel(frame_n, 0)
body_b_f = me.ReferenceFrame('b_f')
body_b = me.RigidBody('b', body_b_cm, body_b_f, sm.symbols('m'), (me.outer(body_b_f.x,body_b_f.x),body_b_cm))
body_a_f.orient(frame_n, 'Axis', [theta, frame_n.y])
body_b_f.orient(body_a_f, 'Axis', [phi, body_a_f.z])
point_o = me.Point('o')
la = (lb-h/2)/2
body_a_cm.set_pos(point_o, la*body_a_f.z)
body_b_cm.set_pos(point_o, lb*body_a_f.z)
body_a_f.set_ang_vel(frame_n, omega*frame_n.y)
body_b_f.set_ang_vel(body_a_f, alpha*body_a_f.z)
point_o.set_vel(frame_n, 0)
body_a_cm.v2pt_theory(point_o,frame_n,body_a_f)
body_b_cm.v2pt_theory(point_o,frame_n,body_a_f)
ma = sm.symbols('ma')
body_a.mass = ma
mb = sm.symbols('mb')
body_b.mass = mb
iaxx = 1/12*ma*(2*la)**2
iayy = iaxx
iazz = 0
ibxx = 1/12*mb*h**2
ibyy = 1/12*mb*(w**2+h**2)
ibzz = 1/12*mb*w**2
body_a.inertia = (me.inertia(body_a_f, iaxx, iayy, iazz, 0, 0, 0), body_a_cm)
body_b.inertia = (me.inertia(body_b_f, ibxx, ibyy, ibzz, 0, 0, 0), body_b_cm)
force_a = body_a.mass*(g*frame_n.z)
force_b = body_b.mass*(g*frame_n.z)
kd_eqs = [thetad - omega, phid - alpha]
forceList = [(body_a.masscenter,body_a.mass*(g*frame_n.z)), (body_b.masscenter,body_b.mass*(g*frame_n.z))]
kane = me.KanesMethod(frame_n, q_ind=[theta,phi], u_ind=[omega, alpha], kd_eqs = kd_eqs)
fr, frstar = kane.kanes_equations([body_a, body_b], forceList)
zero = fr+frstar
from pydy.system import System
sys = System(kane, constants = {g:9.81, lb:0.2, w:0.2, h:0.1, ma:0.01, mb:0.1},
specifieds={},
initial_conditions={theta:np.deg2rad(90), phi:np.deg2rad(0.5), omega:0, alpha:0},
times = np.linspace(0.0, 10, 10/0.02))
y=sys.integrate()
| [
"[email protected]"
] | |
58c58c2aa06311710749227f4e0405344d093517 | 663d429e1f552ef958d37cfe4a0707354b544a9a | /rimi_linux_mysql/tcp_ip_socket/my_async/asyncsocket_chat/client1.py | 4a9a11ecfbb9b7904ff495bf8814706de43aa992 | [] | no_license | nie000/mylinuxlearn | 72a33024648fc4393442511c85d7c439e169a960 | 813ed75a0018446cd661001e8803f50880d09fff | refs/heads/main | 2023-06-20T07:46:11.842538 | 2021-07-15T13:46:43 | 2021-07-15T13:46:43 | 307,377,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | import socket
ss_address = ("0.0.0.0",19528)
ss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('123')
ss.connect(ss_address)
print('456')
while True:
data = input('请输入消息:')
if not data:
break
try:
ss.send(data.encode('utf8'))
print(ss.recv(1024).decode('utf8'))
except BrokenPipeError:
print('连接已经关闭')
break
ss.close()
| [
"[email protected]"
] | |
b9fb87d81b8ea6206160a6408edaca8fa28184b1 | 917974ea96ab36b7fa648dd57762b08e0650f459 | /MySQL/实例/MysqlOperate.py | 4fc8797c3170f610a8f5362ad0be20118f0f1282 | [] | no_license | zhoulongqa/pythonCode | 0d4957c65d2202f7551ba9ab96c06dd86e7b52d5 | 8ffd7503c3e50c5039c907fcf60a028e3829ec40 | refs/heads/master | 2021-09-08T22:23:47.892837 | 2018-03-12T12:20:10 | 2018-03-12T12:20:10 | 124,881,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # encoding=utf-8
import MySQLdb
import random
def getDatabaseConnection():
conn = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='123123',
charset='utf8')
cur = conn.cursor()
return conn, cur
def closeDatabase(conn, cur):
cur.close()
conn.close()
def createDatabase(data_base_name):
conn,cur = getDatabaseConnection()
result = cur.execute(
'create database if not exists %s default charset utf8 collate utf8_general_ci;' % data_base_name)
print result
closeDatabase(conn, cur)
def create_table(database_name, table_sql):
conn,cur = getDatabaseConnection()
conn.select_db(database_name)
result = cur.execute(table_sql)
return result
closeDatabase(conn, cur)
def insert_data(database_name, data_sql):
conn,cur = getDatabaseConnection()
conn.select_db(database_name)
result = cur.execute(data_sql)
print result
closeDatabase(conn, cur)
#createDatabase('wangzeliangDB')
table_sql='''CREATE TABLE user( 'id' int(11) default null,'name' VARCHAR(255) DEFAULT NULL,'passwd' VARCHAR(255) DEFAULT NULL,'birthday' DATA DEFAULT NULL)ENGINE=Innodb DEFAULT CHARSET=utf8;'''
data_sql = "insert into user values(1,'Tom','123','1990-01-01')"
create_table('wangzeliangDB',table_sql)
insert_data("wangzeliangDB", data_sql) | [
"[email protected]"
] | |
d15edac876db06faf9c9c07283a6d10c33c1f8f7 | 6d82c2f984855f0d430ebeb9d5d65adae8a6ed94 | /cdent/parser/pir/grammar.py | d3816445808fe4d76e78862d1a4b2149f3acea58 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | ingydotnet/cdent-py | bce12cfc8ffb10060ba3a67970af3649d01ca37c | 013e967f1436269965e166a91e16bcde3995b765 | refs/heads/master | 2023-05-29T19:11:25.698386 | 2011-09-21T15:15:08 | 2011-09-21T15:15:08 | 139,786 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | """
C'Dent Pir parser grammar module.
"""
from cdent.grammar import *
class Grammar():
def __init__(self):
self.__dict__.update(
{ 'BlankLine': Re({'_': '[\\ \\t]*\\r?\\n'}),
'Class': All({'_': [Rule({'_': 'ClassStart'}), Rule({'_': 'ClassBody'}), Rule({'_': 'ClassEnd'})]}),
'ClassBody': All({'_': [Indent({}), Rule({'x': '*', '_': 'Comment'}), Rule({'_': 'Method'}), Any({'x': '*', '_': [Rule({'_': 'Method'}), Rule({'_': 'Comment'})]})]}),
'ClassEnd': Re({'_': ''}),
'ClassStart': Re({'_': '.namespace[\\ \\t]+\\["(\\w+)"\\]\\r?\\n'}),
'Comment': Any({'_': [Rule({'_': 'LineComment'}), Rule({'_': 'BlankLine'})]}),
'DocComment': All({'_': [Rule({'_': 'DocCommentBegin'}), All({'x': '*', '_': [Rule({'!': True, '_': 'DocCommentEnd'}), Rule({'_': 'DocCommentLine'})]}), Rule({'_': 'DocCommentEnd'})]}),
'DocCommentBegin': Re({'_': '#{3}\\r?\\n'}),
'DocCommentEnd': Re({'_': '#{3}\\r?\\n'}),
'DocCommentLine': Re({'_': '#[\\ \\t]?(.*\\r?\\n)'}),
'Id': Re({'_': '\\w+'}),
'IncludeCDent': Re({'_': 'use CDent;'}),
'Line': Re({'_': '.*\\r?\\n'}),
'LineComment': Re({'_': '#(.*\\r?\\n)'}),
'Method': All({'_': [Rule({'_': 'MethodStart'}), Rule({'_': 'MethodBody'}), Rule({'_': 'MethodEnd'})]}),
'MethodBody': All({'_': [Indent({}), Rule({'_': 'Statement'}), Any({'x': '*', '_': [Rule({'_': 'Statement'}), Rule({'_': 'Comment'})]}), Undent({})]}),
'MethodEnd': Re({'_': '.end\\r?\\n'}),
'MethodStart': Re({'_': '.sub[\\ \\t]+(\\w+)[\\ \\t]+:method\\r?\\n'}),
'Module': All({'_': [Rule({'_': 'ModuleStart'}), Rule({'x': '?', '_': 'DocComment'}), Rule({'x': '*', '_': 'Comment'}), Rule({'x': '?', '_': 'IncludeCDent'}), Rule({'x': '*', '_': 'Comment'}), Rule({'_': 'Class'}), Any({'x': '*', '_': [Rule({'_': 'Class'}), Rule({'_': 'Comment'})]}), Rule({'_': 'ModuleEnd'}), Rule({'x': '*', '_': 'Comment'})]}),
'ModuleEnd': Re({'_': ''}),
'ModuleStart': Re({'_': ''}),
'PrintLn': Re({'_': 'say[\\ \\t]+(.+)\\r?\\n'}),
'Statement': Any({'_': [Rule({'_': 'PrintLn'}), Rule({'_': 'Comment'})]}),
'line_comment_start': Re({'_': '#'})}
)
| [
"[email protected]"
] | |
327f9765f5dd9fd7ec5ddb1747f3de2bffe48a72 | 5acc20092ee93935594a7e0522924245a43e5531 | /support_vector_machines/plot_oneclass_svm.py | b3dd6215ed3a9101ee39baa85ae115e8380814cf | [] | no_license | shengchaohua/sklearn-examples | aae2332c4382a57a70c1887777c125e6dc4579d6 | 1dac6a9b5e703185a8da1df7c724022fbd56a9e4 | refs/heads/master | 2020-05-05T01:19:20.037746 | 2019-10-18T08:55:01 | 2019-10-18T08:55:01 | 179,599,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# Fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# Plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s, edgecolors='k')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s,
edgecolors='k')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s,
edgecolors='k')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| [
"[email protected]"
] | |
3fa18fbb6c6c984c8016aa0330fccb80274eeeb2 | e4414bd8152e52855db7ab9065ae12b7329143e0 | /python/src/two_more_random.py | b87bee878dbdfeb7ad6ff81d257bf7e780ba71dd | [] | no_license | catalinc/programmingpraxis-solutions | 39cb847877ec46d2fb85740791c24889ab5654a8 | c0b13906aa76ffac705bf108db138fb9a38bc16a | refs/heads/master | 2021-03-27T16:46:47.781839 | 2017-09-09T15:17:38 | 2017-09-09T15:17:38 | 53,532,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # A solution for http://programmingpraxis.com/2012/08/21/two-more-random-exercises/
import math
def rand_middle_square(seed):
n = seed
seed_len = int(round(math.log(seed, 10)))
while True:
yield n
n = (n * n) / (10 ** (seed_len / 2)) % (10 ** seed_len)
def randu(seed):
n = seed
while True:
yield n
n = (65539 * n) % 2147483648
def random(count, seed, rand_fn):
nums = []
random_gen = rand_fn(seed)
for _ in xrange(count):
nums.append(random_gen.next())
return nums
print(random(5, 675248, rand_middle_square))
print(random(5, 7, randu))
| [
"[email protected]"
] | |
1a9658d9fae0218278448f9af37f2b5c5e6f3593 | b9696a277966d85548ebf23c77d24554dd98b1c1 | /LasAndClf-dev/get_data_packages/collectdata2bc.py | 9ff8239bd1826c85a88010e5fb370669aed10557 | [] | no_license | hsulab/multiVASP | f1d277b015f97532588f4db21ce14bae68dafed9 | e05bf8c03ff1653ad2621fdd61b8a706138dc37b | refs/heads/master | 2020-03-07T09:12:32.199150 | 2019-10-22T14:30:18 | 2019-10-22T14:30:18 | 127,394,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,986 | py | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import time
import numpy as np
import pandas as pd
"""
"""
class GeoData():
# Geometry Features Data
def __init__(self, name):
self.name = name
# Path Settings
__dirpath = os.path.join(os.path.expanduser('~'), 'Desktop/CH4_DS')
def set_path(self, path):
"""Set data path."""
self.__dirpath = path
def get_path(self):
"""Get data path."""
self.__dirpath
# Get Csv Name
def __csvname(self):
"""Get data csvfile."""
for file_name in os.listdir(self.__dirpath):
if re.match(self.name, file_name):
csv_name = file_name
return csv_name
# Get df col=[name, feas]
def df(self, numbers=-1):
"""Get dataframe."""
csv = os.path.join(self.__dirpath, self.__csvname())
df = pd.read_csv(csv, index_col=0)
fea_numbers = len(df.columns) - 3
#print(self.name+' has '+str(fea_numbers)+' features.')
if numbers == -1:
numbers = fea_numbers
feas = []
for i in range(3, numbers+3):
feas.append(df.columns[i])
return df.loc[:,tuple(['name']+feas)]
class EneData(GeoData):
'Energy Data'
def allE(self):
df = self.df()
mtype = [] # mechanism type
mE = []
for i in range(df.shape[0]):
if df.loc[i, 'E_ts'] == 'np.nan' and df.loc[i, 'E_tsra'] == 'np.nan':
mtype.append('np.nan')
mE.append('np.nan')
elif df.loc[i, 'E_ts'] == 'np.nan' and df.loc[i, 'E_tsra'] != 'np.nan':
mtype.append('tsra')
mE.append(df.loc[i, 'E_tsra'])
elif df.loc[i, 'E_ts'] != 'np.nan' and df.loc[i, 'E_tsra'] == 'np.nan':
mtype.append('ts')
mE.append(df.loc[i, 'E_ts'])
elif df.loc[i, 'E_ts'] > df.loc[i, 'E_tsra']:
mtype.append('tsra')
mE.append(df.loc[i, 'E_tsra'])
else:
mtype.append('ts')
mE.append(df.loc[i, 'E_ts'])
df.loc[:, 'mtype'] = mtype
df.loc[:, 'mE'] = mE
return df
def get_data():
"""
Description:
Get Geo DataFrame.
descriptors: distance 45, angles 360, dihedrals 630.
"""
print('Load Data...')
suf = GeoData('suf').df()
hab3 = GeoData('Hab3').df()
ch3ab = GeoData('CH3ab').df()
#
delta_df = pd.DataFrame()
delta_df.loc[:, 'name'] = suf.loc[:, 'name']
cols = suf.columns[1:]
for col in cols:
t = col.strip('suf')
delta_df.loc[:, t+'hab3'] = hab3.loc[:, t+'Hab3'] - suf.loc[:, t+'suf']
for col in cols:
t = col.strip('suf')
delta_df.loc[:, t+'ch3ab'] = ch3ab.loc[:, t+'CH3ab'] - suf.loc[:, t+'suf']
'Merge geofeas'
print('This set has ', delta_df.shape[0], 'samples.')
print('This set has ', delta_df.shape[1]-1, 'features.')
'Get numbers of geofeas'
print('Merge Data...')
E_feas = ['name', 'mtype', 'E_ts', 'E_tsra', 'mE', 'E_Hab3', 'E_CH3ab']
fE = EneData('fE').allE().loc[:, E_feas] # reaction Energy
e_numbers = fE.shape[1]
di = pd.merge(fE, delta_df, on='name')
new_di = di.loc[di.loc[:,'mtype']!='np.nan', :]
# !!!
new_di = new_di.loc[di.loc[:,'name']!='pureMoO2', :] # CH3ab wrong
new_di = new_di.loc[di.loc[:,'name']!='pureMnO2', :] # CH3ab wrong
new_di = new_di.loc[di.loc[:,'name']!='dopCrO2_Ru', :] # CH3ab wrong
print('Energy and Geometry set has ', new_di.shape[0], 'samples.')
print('Energy and Geometry set has ', new_di.shape[1]-5, 'features.')
# Save data -> ./CH4_DataSet.csv
merged_data_csv = './CH4_neo.csv'
print('Save data -> %s' %merged_data_csv)
new_di.to_csv(merged_data_csv)
return new_di # [name, mtype, mE, geo ... feas]
if __name__ == '__main__':
'geoFeatures Total 1035'
get_data()
| [
"[email protected]"
] | |
6b93ecdbb92e6d5706872b8722d49a411dcbc403 | fdd6c6a1b8e6e7e8cd267de97a1b435777342e1b | /tests/test_altdphi.py | 3a840650b023a7e240e31fe5fb070f9cd216cce3 | [
"BSD-3-Clause"
] | permissive | TaiSakuma/altdphi | bccec475432dec5aebafda4e47d12fcc5cf048d6 | ed74418fe6e0e4b08582d80093102795276d17d6 | refs/heads/master | 2021-03-16T08:45:10.249447 | 2019-05-14T16:44:40 | 2019-05-14T16:44:40 | 118,086,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | # Tai Sakuma <[email protected]>
import numpy as np
import pytest
from altdphi import AltDphi
from .testing import assert_altdphi_equal
from .expected import *
##__________________________________________________________________||
@pytest.fixture(
params=[
(event_nojet, altdphi_nojet, altdphi_met_nojet),
(event_monojet, altdphi_monojet, altdphi_met_monojet),
(event_two_jets, altdphi_two_jets, altdphi_met_two_jets),
(event_three_jets, altdphi_three_jets, altdphi_met_three_jets),
(event_four_jets, altdphi_four_jets, altdphi_met_four_jets),
(event_twelve_jets, altdphi_twelve_jets, altdphi_met_twelve_jets),
],
ids=('nojet', 'monojet', 'two_jets', 'three_jets', 'four_jets', 'twelve_jets')
)
def event_altdphi(request):
return request.param
def test_altdphi(event_altdphi):
event = event_altdphi[0]
expected_altdphi = event_altdphi[1]
pt = event['jet_pt']
phi = event['jet_phi']
actual_altdphi = AltDphi(pt=pt, phi=phi)
assert_altdphi_equal(expected_altdphi, actual_altdphi)
def test_altdphi_met(event_altdphi):
event = event_altdphi[0]
expected_altdphi = event_altdphi[2]
pt = event['jet_pt']
phi = event['jet_phi']
met = event['met']
met_phi = event['met_phi']
actual_altdphi = AltDphi(pt=pt, phi=phi, mht=met, mht_phi=met_phi)
assert_altdphi_equal(expected_altdphi, actual_altdphi)
##__________________________________________________________________||
def test_altdphi_monojet_is_minus_mht():
event = event_monojet
pt = event['jet_pt']
phi = event['jet_phi']
altdphi = AltDphi(pt=pt, phi=phi)
assert pt[0] == altdphi.mht
assert [1] == altdphi.f
assert [-1] == altdphi.cos_dphi
def test_altdphi_monojet_is_not_minus_mht():
event = event_monojet
pt = event['jet_pt']
phi = event['jet_phi']
mht = event['met']
mht_phi = event['met_phi']
altdphi = AltDphi(pt=pt, phi=phi, mht=mht, mht_phi=mht_phi)
assert pt[0] != altdphi.mht
assert [1] != altdphi.f
assert [-1] != altdphi.cos_dphi
##__________________________________________________________________||
| [
"[email protected]"
] | |
728158a4d9026a97e17a89c008935c78bba93cc3 | 2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf | /MLP/lec13-4[Kmeans].py | 84ab79f09472a0230ce9c1721fc34ce47e22cf64 | [] | no_license | cutz-j/TodayILearned | 320b5774de68a0f4f68fda28a6a8b980097d6ada | 429b24e063283a0d752ccdfbff455abd30ba3859 | refs/heads/master | 2020-03-23T17:34:51.389065 | 2018-11-24T08:49:41 | 2018-11-24T08:49:41 | 141,865,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | import pandas as pd
from sklearn import datasets
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
iris = datasets.load_iris()
labels = pd.DataFrame(iris.target)
labels.columns = ['labels']
data = pd.DataFrame(iris.data)
data.columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
data = pd.concat([data, labels], axis=1)
feature = data[['Sepal_length', 'Sepal_width']]
model = KMeans(n_clusters=5, algorithm='auto')
scaler = StandardScaler()
pipeline = make_pipeline(scaler, model)
pipeline.fit(feature)
predict = pd.DataFrame(pipeline.predict(feature))
ks = range(1,10)
inertias = []
for k in ks:
model = KMeans(n_cluster=k)
model.fit(feature)
inertias.append(model.inertia_)
predict.columns = ['predict']
r = pd.concat([feature, predict], axis=1)
plt.scatter(r['Sepal_length'], r['Sepal_width'], c=r['predict'], alpha=0.5)
centers = pd.DataFrame(model.cluster_centers_, columns=['Sepal_length', 'Sepal_width'])
#center_x = centers['Sepal_length']
#center_y = centers['Sepal_width']
#plt.scatter(center_x, center_y, s=50, marker='D', c='r')
#plt.show() | [
"[email protected]"
] | |
c3e43afbae66f6aa4658cc2e059a94f5e45187c6 | b5d738624d7016f7e10796485624c567099374ab | /starthinker/util/dcm/schema/Activity_Metrics.py | cfddd432e314017727e16532df1791ab7115aa76 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | dvandra/starthinker | 212d8166752c36fbe6a5e0988fb5ad598f35c4a6 | 07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9 | refs/heads/master | 2020-06-14T05:19:08.348496 | 2019-07-02T17:54:06 | 2019-07-02T17:54:06 | 194,915,001 | 1 | 0 | Apache-2.0 | 2019-07-02T18:25:23 | 2019-07-02T18:25:23 | null | UTF-8 | Python | false | false | 3,495 | py | ###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
Activity_Metrics_Schema = [
{ "name":"Click_Through_Conversions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Click_Through_Revenue", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"View_Through_Conversions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"View_Through_Revenue", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Click_Through_Conversions_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Click_Through_Revenue_Cross_Environment", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Click_Through_Conversion_Events_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Total_Conversions_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Total_Revenue_Cross_Environment", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Total_Conversion_Events_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"View_Through_Conversions_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"View_Through_Revenue_Cross_Environment", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"View_Through_Conversion_Events_Cross_Environment", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Dynamic_Element_Click_Through_Conversions", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Dynamic_Element_Total_Conversions", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Dynamic_Element_View_Through_Conversions", "type":"INTEGER", "mode":"NULLABLE" },
{ "name":"Natural_Search_Actions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Natural_Search_Revenue", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Natural_Search_Transactions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Action_Conversion_Percentage", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Paid_Search_Actions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Average_Cost_Per_Action", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Average_Cost_Per_Transaction", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Average_Dcm_Transaction_Amount", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Paid_Search_Revenue", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Spend_Per_Transaction_Revenue", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Transaction_Conversion_Percentage", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Floodlight_Paid_Search_Transaction_Revenue_Per_Spend", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Paid_Search_Transactions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Total_Conversions", "type":"FLOAT", "mode":"NULLABLE" },
{ "name":"Total_Revenue", "type":"FLOAT", "mode":"NULLABLE" }
] | [
"[email protected]"
] | |
8147ee54973388356c60c895c778940a1eee9e84 | 8d014c5513a0eeca086010b018b67336f8d042e0 | /wicam_vlc.py | 480b078447c514a89f8b8c568d94727f18331028 | [] | no_license | rkuo2000/cv2 | 26ce0a06b4040eabb82319ec44cab5c3639b9495 | 16e64e7092d6654ea470e469d6b15f308ecd1788 | refs/heads/master | 2022-10-12T00:11:35.964818 | 2022-09-30T06:50:35 | 2022-09-30T06:50:35 | 108,848,948 | 5 | 29 | null | 2022-09-29T11:01:48 | 2017-10-30T12:38:58 | Python | UTF-8 | Python | false | false | 457 | py | # Install VLC Player on PC
# Add Environtment System Variables: VLC_PLUGIN_PATH = C:\Program Files\VideoLAN\VLC\plugins
# pip install python-vlc
# WiFi connected to WiCam module (streaming video)
import cv2
import vlc
#player=vlc.MediaPlayer('rtsp://192.168.100.1/cam1/h264')
player=vlc.MediaPlayer('rtsp://192.168.100.1/cam1/mpeg4')
while 1:
frame = player.play()
cv2.imshow('VIDEO',frame)
cv2.waitKey(1)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
2d50a33f7a6f96a094b2b5a8c3082d850f8c3b9a | dea8cfa596d52d5db0e28ac43504e7212b43081b | /python/AtCoder Beginner Contest 123/Five Dishes .py | 5b3b87bc101c5841242a539782cdaf0a6b8925b9 | [] | no_license | Yuta123456/AtCoder | 9871a44f12a8fca87b0e2863a999b716128de1ac | ca04422699719563e311f7d973459ba1dc238c2c | refs/heads/master | 2023-01-04T22:33:54.120454 | 2020-11-04T05:20:37 | 2020-11-04T05:20:37 | 286,409,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | def ceil(x):
k = x % 10
if k == 0:
return x
else:
return x + (10 - k)
d = []
for i in range(5):
d.append(int(input()))
d_min = []
min = 124
sum = 0
index = -1
for i in range(5):
d_min.append((d[i]) % 10)
for i in range(5):
if d_min[i] != 0:
if min > d_min[i]:
min = d_min[i]
index = i
if index == -1:
index = 0
for i in range(5):
if i != index:
sum = sum + ceil(d[i])
sum += d[index]
print(sum)
| [
"[email protected]"
] | |
40b35aefa6aa53d7c9e97137d474309dfdb68a8e | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /15_Feature_Engineering_for_Machine_Learning_in_Python/2_Dealing_with_Messy_Data/howSparseIsMyData.py | 6c17397750627c6381fd7a7979223548ea23969e | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # How sparse is my data?
# Most data sets contain missing values, often represented as NaN (Not a Number). If you are working with Pandas you can easily check how many missing values exist in each column.
# Let's find out how many of the developers taking the survey chose to enter their age (found in the Age column of so_survey_df) and their gender (Gender column of so_survey_df).
# Instructions 1/2
# 50 XP
# Subset the DataFrame to only include the 'Age' and 'Gender' columns.
# Print the number of non-missing values in both columns.
# Subset the DataFrame
sub_df = so_survey_df[['Age', 'Gender']]
# Print the number of non-missing values
print(sub_df.notnull().sum())
| [
"[email protected]"
] | |
2093ce0cb85111f3f214151ed4bcb78b1d2e34fc | ff4fe07752b61aa6404f85a8b4752e21e8a5bac8 | /challenge-209/eric-cheung/python/ch-2.py | 624ac7029fd2589ae8c5e87fe90970b576910183 | [] | no_license | choroba/perlweeklychallenge-club | 7c7127b3380664ca829158f2b6161c2f0153dfd9 | 2b2c6ec6ece04737ba9a572109d5e7072fdaa14a | refs/heads/master | 2023-08-10T08:11:40.142292 | 2023-08-06T20:44:13 | 2023-08-06T20:44:13 | 189,776,839 | 0 | 1 | null | 2019-06-01T20:56:32 | 2019-06-01T20:56:32 | null | UTF-8 | Python | false | false | 1,044 | py |
## arrAccount = [["A", "[email protected]", "[email protected]"], ["B", "[email protected]"], ["A", "[email protected]", "[email protected]"]] ## Example 1
arrAccount = [["A", "[email protected]", "[email protected]"], ["B", "[email protected]"], ["A", "[email protected]"], ["B", "[email protected]", "[email protected]"]] ## Example 2
arrUser = [arrAccount[0][0]]
arrEmail = [arrAccount[0][1:]]
arrFinal = []
for nIndx in range(1, len(arrAccount)):
if arrAccount[nIndx][0] not in arrUser:
arrUser.append(arrAccount[nIndx][0])
arrEmail.append(arrAccount[nIndx][1:])
else:
nFindIndx = arrUser.index(arrAccount[nIndx][0])
if len(list(set(arrEmail[nFindIndx]) & set(arrAccount[nIndx][1:]))) == 0:
arrUser.append(arrAccount[nIndx][0])
arrEmail.append(arrAccount[nIndx][1:])
else:
arrEmail[nFindIndx] = sorted(list(set(arrEmail[nFindIndx] + arrAccount[nIndx][1:])))
## print (arrUser)
## print (arrEmail)
for nIndx in range(0, len(arrUser)):
arrFinal.append([arrUser[nIndx], str(arrEmail[nIndx][:])[1:-1]])
print (arrFinal)
| [
"[email protected]"
] | |
5e3155e560a1c4c9932aad5f2150648b1da46f76 | fd7d7e1410874d18823bbe3c0f3c521cb54e079c | /news/migrations/0008_auto_20191002_1652.py | 50dcb8502c426938f35aa56cdf27f8b4495221b8 | [
"MIT"
] | permissive | alex-muliande/tribune | 9c1728311e42b16cf90e3b6e94b64a2b1ed3c8be | 86316dd4b20a76320b4b20b86266f89aac02a326 | refs/heads/master | 2023-08-03T13:20:47.852749 | 2019-10-03T09:25:46 | 2019-10-03T09:25:46 | 212,532,363 | 1 | 0 | MIT | 2021-09-08T01:19:17 | 2019-10-03T08:37:00 | Python | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-02 13:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0007_article_article_image'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_image',
field=models.ImageField(upload_to='articles/'),
),
]
| [
"[email protected]"
] | |
e4da1c60a852bd610107e481b15b04c840883e61 | 306a4c0c7ed32e879f76e6c101da70c46679f6bc | /copying_files_folders.py | 2ff064ba2279ee561714d6f97429272228f18007 | [] | no_license | ksoh512/automatetheboringstuff | 3552f803d73644862e2e31d307b50aff82b6a839 | 0d9ee8de7927dbe0e0f08dbfb73867ffd9bf563c | refs/heads/master | 2021-01-20T03:02:14.554780 | 2017-08-24T22:52:37 | 2017-08-24T22:52:37 | 101,343,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import shutil, os
os.chdir('C:\\')
''' COPY FILES '''
shutil.copy('C:\\spam.txt', 'C:\\Users\\koh\\Documents\\codes\\automattheboringstuff')
shutil.copy('C:\\eggs.txt', 'C:\\Users\\koh\\Documents\\codes\\automattheboringstuff')
'''COPY FOLDERS AND FILES CONTAINED IN IT'''
shutil.copytree('C:\\delicious', 'C:\\Users\\koh\\Documents\\codes\\automattheboringstuff')
| [
"[email protected]"
] | |
c0642e90ddb142bbe67af2cbb81148287054d3d3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/530bd27ed2c4c8e3f6a44b332569c3f73dfcb332-<test_np_mixed_precision_binary_funcs>-fix.py | 8e9d3b67eb8f1b2c486e7784f60da806ef689c24 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | @with_seed()
@use_np
def test_np_mixed_precision_binary_funcs():
def check_mixed_precision_binary_func(func, low, high, lshape, rshape, ltype, rtype):
class TestMixedBinary(HybridBlock):
def __init__(self, func):
super(TestMixedBinary, self).__init__()
self._func = func
def hybrid_forward(self, F, a, b, *args, **kwargs):
return getattr(F.np, self._func)(a, b)
np_func = getattr(_np, func)
mx_func = TestMixedBinary(func)
np_test_x1 = _np.random.uniform(low, high, lshape).astype(ltype)
np_test_x2 = _np.random.uniform(low, high, rshape).astype(rtype)
mx_test_x1 = mx.numpy.array(np_test_x1, dtype=ltype)
mx_test_x2 = mx.numpy.array(np_test_x2, dtype=rtype)
rtol = (0.01 if ((ltype is np.float16) or (rtype is np.float16)) else 0.001)
atol = (0.001 if ((ltype is np.float16) or (rtype is np.float16)) else 1e-05)
for hybridize in [True, False]:
if hybridize:
mx_func.hybridize()
np_out = np_func(np_test_x1, np_test_x2)
with mx.autograd.record():
y = mx_func(mx_test_x1, mx_test_x2)
assert (y.shape == np_out.shape)
assert_almost_equal(y.asnumpy(), np_out.astype(y.dtype), rtol=rtol, atol=atol, use_broadcast=False, equal_nan=True)
np_out = getattr(_np, func)(np_test_x1, np_test_x2)
mx_out = getattr(mx.np, func)(mx_test_x1, mx_test_x2)
assert (mx_out.shape == np_out.shape)
assert_almost_equal(mx_out.asnumpy(), np_out.astype(mx_out.dtype), rtol=rtol, atol=atol, use_broadcast=False, equal_nan=True)
funcs = {
'add': ((- 1.0), 1.0),
'subtract': ((- 1.0), 1.0),
'multiply': ((- 1.0), 1.0),
}
shape_pairs = [((3, 2), (3, 2)), ((3, 2), (3, 1)), ((3, 1), (3, 0)), ((0, 2), (1, 2)), ((2, 3, 4), (3, 1)), ((2, 3), ()), ((), (2, 3))]
itypes = [np.bool, np.int8, np.int32, np.int64]
ftypes = [np.float16, np.float32, np.float64]
for (func, func_data) in funcs.items():
(low, high) = func_data
for (lshape, rshape) in shape_pairs:
for (type1, type2) in itertools.product(itypes, ftypes):
check_mixed_precision_binary_func(func, low, high, lshape, rshape, type1, type2)
check_mixed_precision_binary_func(func, low, high, lshape, rshape, type2, type1)
for (type1, type2) in itertools.product(ftypes, ftypes):
if (type1 == type2):
continue
check_mixed_precision_binary_func(func, low, high, lshape, rshape, type1, type2) | [
"[email protected]"
] | |
34c27860cdf81fee0a1067a3153e527e6bec3bf2 | 126970b5a7aef7def577922f9ed4bc0889ec5804 | /products/views.py | 6d46af5e0d4df6315060a0e34a7600163f3b5171 | [] | no_license | HeshamSayed/ElectroBekia | 6544955d1449ce03e6fd432bfdff05422a9f92ba | 42fab2ed3dc43f6f3e3e75cc17a7a26cb747d385 | refs/heads/master | 2022-12-13T21:47:10.673963 | 2019-06-18T16:03:04 | 2019-06-18T16:03:04 | 186,132,437 | 0 | 3 | null | 2022-12-08T05:08:00 | 2019-05-11T12:50:58 | CSS | UTF-8 | Python | false | false | 623 | py | from django.shortcuts import render, get_object_or_404
from .models import *
from cart.forms import CartAddProductForm
def product_list(request):
categories = Category.objects.all()
products = Product.objects.all()
context = {
'categories': categories,
'products': products,
}
return render(request, 'products/list.html', context)
def product_detail(request, pk):
product = get_object_or_404(Product, pk=pk)
cart_product_form = CartAddProductForm()
context = {
'product': product,
'cart_product_form': cart_product_form,
}
return render(request, 'products/detail.html', context)
| [
"[email protected]"
] | |
360817c27d99ca21241781e95372efb461f4a4b0 | dba16143d8fa6aa73ca1d4df7bcfaca42824412c | /src/year2021/day05b.py | 7312f094f2e1e4a1e1a73ecd10c0b4f4b98def4c | [
"Unlicense"
] | permissive | lancelote/advent_of_code | 84559bf633189db3c3e4008b7777b1112f7ecd30 | 4b8ac6a97859b1320f77ba0ee91168b58db28cdb | refs/heads/master | 2023-02-03T14:13:07.674369 | 2023-01-24T20:06:43 | 2023-01-24T20:06:43 | 47,609,324 | 11 | 0 | null | 2019-10-07T07:06:42 | 2015-12-08T08:35:51 | Python | UTF-8 | Python | false | false | 305 | py | """2021 - Day 5 Part 2: Hydrothermal Venture."""
from src.year2021.day05a import Floor
from src.year2021.day05a import Segment
def solve(task: str) -> int:
segments = [Segment.from_line(line) for line in task.splitlines()]
floor = Floor()
floor.draw(segments)
return floor.num_overlap
| [
"[email protected]"
] | |
0c4ced2b9b0cda7893c263ca688f35f779c8fbfb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_257/ch16_2020_09_23_12_39_52_743421.py | 18424641beeb01c2388537b04c4757f22a441245 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | conta= float(input("Valor da conta com 10%: R$ "))
conta += conta*10/100
print("Valor da conta com 10%: R$ {0:.2f}".format(conta)) | [
"[email protected]"
] | |
1c44748dba44714166cfa7f35d87338249edc098 | 088e000eb5f16e6d0d56c19833b37de4e67d1097 | /inference-engine/ie_bridges/python/sample/benchmark_app/benchmark/utils/inputs_filling.py | 00a294524716055d8a481d0892e5cc307a9458b6 | [
"Apache-2.0"
] | permissive | projectceladon/dldt | 614ba719a428cbb46d64ab8d1e845ac25e85a53e | ba6e22b1b5ee4cbefcc30e8d9493cddb0bb3dfdf | refs/heads/2019 | 2022-11-24T10:22:34.693033 | 2019-08-09T16:02:42 | 2019-08-09T16:02:42 | 204,383,002 | 1 | 1 | Apache-2.0 | 2022-11-22T04:06:09 | 2019-08-26T02:48:52 | C++ | UTF-8 | Python | false | false | 8,029 | py | """
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import cv2
import numpy as np
import sys
from glob import glob
from random import choice
from .logging import logger
IMAGE_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'BMP']
BINARY_EXTENSIONS = ['BIN']
def isImage(blob):
if (blob.layout != "NCHW"):
return False
channels = blob.shape[1]
return (channels == 3)
def isImageInfo(blob):
if (blob.layout != "NC"):
return False
channels = blob.shape[1]
return (channels >= 2)
def getInputs(path_to_input, batch_size, input_info, requests):
input_image_sizes = {}
for key in input_info.keys():
if (isImage(input_info[key])):
input_image_sizes[key] = (input_info[key].shape[2], input_info[key].shape[3])
logger.info("Network input '{}' precision {}, dimensions ({}): {}".format(key,
input_info[key].precision,
input_info[key].layout,
" ".join(str(x) for x in input_info[key].shape)))
images_count = len(input_image_sizes.keys())
binaries_count = len(input_info) - images_count
image_files = list()
binary_files = list()
if (path_to_input):
image_files = get_files_by_extensions(path_to_input, IMAGE_EXTENSIONS)
image_files.sort()
binary_files = get_files_by_extensions(path_to_input, BINARY_EXTENSIONS)
binary_files.sort()
if (len(image_files) == 0) and (len(binary_files) == 0):
logger.warn("No input files were given: all inputs will be filled with random values!")
else:
binary_to_be_used = binaries_count*batch_size*len(requests)
if binary_to_be_used > 0 and len(binary_files) == 0:
logger.warn("No supported binary inputs found! Please check your file extensions: {}".format(",".join(BINARY_EXTENSIONS)))
elif binary_to_be_used > len(binary_files):
logger.warn("Some binary input files will be duplicated: {} files are required, but only {} were provided".format(binary_to_be_used, len(binary_files)))
elif binary_to_be_used < len(binary_files):
logger.warn("Some binary input files will be ignored: only {} files are required from {}".format(binary_to_be_used, len(binary_files)))
images_to_be_used = images_count*batch_size*len(requests)
if images_to_be_used > 0 and len(image_files) == 0:
logger.warn("No supported image inputs found! Please check your file extensions: {}".format(",".join(IMAGE_EXTENSIONS)))
elif images_to_be_used > len(image_files):
logger.warn("Some image input files will be duplicated: {} files are required, but only {} were provided".format(images_to_be_used, len(image_files)))
elif images_to_be_used < len(image_files):
logger.warn("Some image input files will be ignored: only {} files are required from {}".format(images_to_be_used, len(image_files)))
requests_input_data = []
for request_id in range(0, len(requests)):
logger.info("Infer Request {} filling".format(request_id))
input_data = {}
keys = list(input_info.keys())
for key in keys:
if isImage(input_info[key]):
# input is image
if (len(image_files) > 0):
input_data[key] = fill_blob_with_image(image_files, request_id, batch_size, keys.index(key), len(keys), input_info[key].shape)
continue
# input is binary
if (len(binary_files) > 0):
input_data[key] = fill_blob_with_binary(binary_files, input_info[key].shape)
continue
# most likely input is image info
if isImageInfo(input_info[key]) and len(input_image_sizes) == 1:
image_size = input_image_sizes[list(input_image_sizes.keys()).pop()]
logger.info("Fill input '" + key + "' with image size " + str(image_size[0]) + "x" +
str(image_size[1]))
input_data[key] = fill_blob_with_image_info(image_size, input_info[key].shape)
continue
# fill with random data
logger.info("Fill input '{}' with random values ({} is expected)".format(key, "image" if isImage(input_info[key]) else "some binary data"))
input_data[key] = fill_blob_with_random(input_info[key].precision, input_info[key].shape)
requests_input_data.append(input_data)
return requests_input_data
def get_files_by_extensions(path_to_input, extensions):
input_files = list()
if os.path.isfile(path_to_input):
input_files.append(path_to_input)
else:
path = os.path.join(path_to_input, '*')
files = glob(path, recursive=True)
for file in files:
file_extension = file.rsplit('.').pop().upper()
if file_extension in extensions:
input_files.append(file)
return input_files
def fill_blob_with_image(image_paths, request_id, batch_size, input_id, input_size, shape):
images = np.ndarray(shape)
image_index = request_id*batch_size*input_size + input_id
for b in range(batch_size):
image_index %= len(image_paths)
image_filename = image_paths[image_index]
image = cv2.imread(image_filename)
new_im_size = tuple(shape[2:])
if image.shape[:-1] != new_im_size:
logger.warn("Image {} is resized from ({}) to ({})".format(image_filename, image.shape[:-1], new_im_size))
image = cv2.resize(image, new_im_size)
image = image.transpose((2, 1, 0))
images[b] = image
image_index += input_size
return images
def fill_blob_with_binary(binary_paths, request_id, batch_size, input_id, input_size, shape):
binaries = np.ndarray(shape)
binary_index = request_id*batch_size*input_size + input_id
for b in range(batch_size):
binary_index %= len(image_paths)
binary_filename = binary_paths[binary_index]
binary_file_size = os.path.getsize(binary_file)
input_size = np.prod(shape)/batch_size
if (input_size != binary_file_size):
raise Exception("File " + binary_filename + " contains " << str(binary_file_size) + " bytes " +
"but network expects " + str(input_size))
with open(binary_file, 'r') as f:
binary_data = f.read()
binaries[b] = binary_data
binary_index += input_size
return binaries
def fill_blob_with_image_info(image_size, shape):
im_info = np.ndarray(shape)
for b in range(shape[0]):
for i in range(shape[1]):
im_info[b][i] = image_size[i] if i in [0, 1] else 1
return im_info
def fill_blob_with_random(precision, shape):
if precision == "FP32":
return np.random.rand(*shape).astype(np.float32)
elif precision == "FP16":
return np.random.rand(*shape).astype(np.float16)
elif precision == "I32":
return np.random.rand(*shape).astype(np.int32)
elif precision == "U8":
return np.random.rand(*shape).astype(np.uint8)
elif precision == "I8":
return np.random.rand(*shape).astype(np.int8)
elif precision == "U16":
return np.random.rand(*shape).astype(np.uint16)
elif precision == "I16":
return np.random.rand(*shape).astype(np.int16)
else:
raise Exception("Input precision is not supported: " + precision)
| [
"[email protected]"
] | |
b11132f9a28ade952b2c9bb6c536a6194a591483 | c4e729edfb9b056b9aa111a31eebefe41f39ac46 | /cloudweb/db/message/message_object.py | 5f4f03c9a16d5b91d0beb86ef7b8ac7b1a9f8a7d | [] | no_license | sun3shines/web | 870a558538278ecb4a39e5d9cab4ba2ebb626ca3 | 9e6f83e6e793f86ecdf7202daae3903cc052f266 | refs/heads/master | 2021-01-18T23:49:48.722245 | 2016-07-02T10:25:24 | 2016-07-02T10:25:24 | 54,888,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,712 | py | # -*- coding: utf-8 -*-
from urllib import unquote
from cloudweb.db.db_record import record_put
# msgPut -> db_message_object_put
# msgGet -> db_message_object_get
# msgHead -> db_message_object_head
# msgMeta -> db_message_object_meta
# msgDelete -> db_message_object_delete
# msgDeleteRecycle -> db_message_object_deleterecycle
# msgMove -> db_message_object_move
# msgCopy -> db_message_object_copy
# msgMoveRecycle -> db_message_object_moverecycle
# msgPost -> db_message_object_post
def db_message_object_put(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s PUT OBJECT %s' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_get(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s DOWNLOAD OBJECT %s' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_head(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s GET OBJECT %s INFO' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_meta(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s GET OBJECT %s METADATA' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_delete(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s DELETE OBJECT %s' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_deleterecycle(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s MOVE OBJECT %s TO RECYCLE' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_move(db,objPath,dstName):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s MOVE OBJECT %s TO %s' % (urName,objName,dstName)
return record_put(db, msg, urName, objPath)
def db_message_object_copy(db,objPath,dstName):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s COPY OBJECT %s TO %s' % (urName,objName,dstName)
return record_put(db, msg, urName, objPath)
def db_message_object_moverecycle(db,objPath):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s MOVE OBJECT %s FROM RECYCLE' % (urName,objName)
return record_put(db, msg, urName, objPath)
def db_message_object_post(db,objPath,header):
# objPath = unquote(path)
# objPath = '/'.join(objPath.split('/')[3:])
urName = objPath.split('/')[0]
objName = objPath.split('/')[-1]
msg = ' %s UPDATE OBJECT METADATA %s' % (urName,objName,header)
return record_put(db, msg, urName, objPath)
| [
"[email protected]"
] | |
f78c3b2044a52976c4a838d5b89dbdf2832b3022 | 3b8a4101995b1ba889dc685901a62db72ab13184 | /examples/tweets/config.py | 9956e34e66ccae84cc8211d70d5b54357f47a9c3 | [
"BSD-3-Clause"
] | permissive | tchen0123/pulsar | 586aeb69419c0eac034431405979edd91b4347b2 | 53a311e51974a27f6ef081c38193b41dede1412f | refs/heads/master | 2021-01-18T19:12:04.143947 | 2015-07-21T06:29:04 | 2015-07-21T06:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | twitter_api_key = 'twitter API key of your registered twitter application'
twitter_api_secret = 'consumer secret'
twitter_access_token = 'Access token'
twitter_access_secret = 'Access token secret'
twitter_stream_filter = {'track': 'python'}
| [
"[email protected]"
] | |
b03ac5488518a3f330bc0113472150497457e28f | 44d2f40d4229f1cb26cec013cb18751d8006a219 | /snippets_backend/settings/development.py | ffcb584fad67c3d5c1faf1d5e6527be5cf605b3f | [] | no_license | prettyirrelevant/snippets-backend | 4bcb1d4c2cfa9bcd099856f026320c1250f08dc3 | 0006c194870904620599ca52b8b1510b11c1e2e9 | refs/heads/master | 2023-03-21T19:36:22.230824 | 2021-03-22T09:57:14 | 2021-03-22T09:57:14 | 331,959,739 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | """
Django settings for snippets_backend project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@w*wfy5)lp19)4-zf&0y^je9wc8=)ljqjcwoj82xsujxfi&o-1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
AUTH_USER_MODEL = 'api.User'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'knox',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'snippets_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'snippets_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ['knox.auth.TokenAuthentication'],
'DEFAULT_PERMISSION_CLASSES': ['rest_framework.permissions.AllowAny']
}
REST_KNOX = {
'USER_SERIALIZER': 'api.serializers.UserSerializer',
'EXPIRY_DATETIME_FORMAT': None
}
CORS_ALLOWED_ORIGINS = ["http://localhost:3000"]
| [
"[email protected]"
] | |
07ececcce929817e0e7dd845a6f6bbe686954a00 | 701ff727e23005eebc4410b30752f32e64ead30e | /config/settings.py | 78b947ed91b70730b28c4c615c31a10434da97e3 | [] | no_license | sinjorjob/django-chat | 79ae5a94464301b2913be18ef5c81d2c870817b2 | d35d7fdb3888cdefa1a4daead05f10454a20ef4f | refs/heads/master | 2023-06-25T09:18:51.551222 | 2021-07-30T19:13:02 | 2021-07-30T19:13:02 | 391,166,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,930 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^dal#)b$5x8j2)2(osq^d^i-tt*=7pux8$i$(-pjd%bi+ia9@n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chat.apps.ChatConfig', #追加
'accounts.apps.AccountsConfig', #追加
'widget_tweaks', #追加
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
import os
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# カスタムユーザモデル
AUTH_USER_MODEL = 'accounts.CustomUser'
#ログイン後のリダイレクトURL設定
LOGIN_REDIRECT_URL = '/chat_room/'
#ログアウト後のリダイレクト先
LOGOUT_REDIRECT_URL = '/'
# ファイルアップロード用
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
515f571aec0c41aa280a7ad4f155a691de756151 | e7efae2b83216d9621bd93390959d652de779c3d | /datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/__init__.py | 8153698063f3e5affe147fad62925b3a12cfa3e0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 617 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ..console import CONTEXT_SETTINGS
from .changelog import changelog
from .integrations import integrations
from .integrations_changelog import integrations_changelog
from .requirements import requirements
ALL_COMMANDS = (changelog, requirements, integrations, integrations_changelog)
@click.group(context_settings=CONTEXT_SETTINGS, short_help='A collection of tasks related to the Datadog Agent')
def agent():
pass
for command in ALL_COMMANDS:
agent.add_command(command)
| [
"[email protected]"
] | |
70926b02978529fa9edc66e2ea1a2862ddad1222 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_074/ch153_2020_04_13_20_39_02_324902.py | 7c41c4092f5c5dbd3653ad6e6e0f0b55a83b2984 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | def agrupa_por_idade(dicio):
dicio={[nome]:idade}
key=nome
dic={}
if idade<=11:
dic.update={criança:[nome]}
return dic
if idade>=12 and idade>=17:
dic.update={adolescente:[nome]}
return dic
if idade>=18 and idade<=59:
dic.update={adulto:[nome]}
return dic
else:
dic.update={idoso:[nome]}
return dic | [
"[email protected]"
] | |
83fec668e56fcdff66e94ad5af3d22793aba1ac8 | 2e67bdd45c0427490880ca02f913a923a0890cdf | /foodcartapp/migrations/0043_order_products.py | 759a18dabb4d27be55133c80f21cd2960ebab509 | [] | no_license | KozhevnikovM/devman-star-burger | 5ed72c2a8a99bee12770bd2d28aa35c92be0cff8 | 54836d0216ea1117ea12ddfff11afbef15e7a3b5 | refs/heads/master | 2023-04-12T23:23:28.862134 | 2021-04-19T13:17:15 | 2021-04-19T13:17:15 | 355,147,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.0.7 on 2021-03-23 12:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foodcartapp', '0042_auto_20210317_1251'),
]
operations = [
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(through='foodcartapp.OrderPosition', to='foodcartapp.Product'),
),
]
| [
"[email protected]"
] | |
849bc3bb90ec4d300eed4c9ce126e2b3ed2aeef5 | b483c598fa375e9af02348960f210b9f482bd655 | /pythonbrasil/exercicios/listas/LT resp 06.py | 4a956fe6704cf8f89b2b9ac2bdcf1bef84176545 | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | '''
Faça um Programa que peça as quatro notas de 10 alunos, calcule e
armazene num vetor a média de cada aluno, imprima o número de alunos
com média maior ou igual a 7.0.
'''
alunos = 2
nota = 0
soma = 0
for i in range(1,3):
notas = []
for j in range(1,3):
nota += float(input("Digite a %iª nota do aluno %i: " %(i, j)))
nota /= 2
notas.append(nota)
for media in notas:
if media > 7:
soma += 1
print("O número de alunos com média maior que 7.00 foi de %i." %soma)
| [
"[email protected]"
] | |
683033b34e5ba82571bedabf75dda4cfedc1e88c | bb62f4738e32b82904b61d4be9d21b41d05ed694 | /motion_planners/rrt_connect.py | bb1702b156f42c9066f8eda37cc052634eb5eeba | [
"MIT"
] | permissive | yhome22/motion-planners | 34049b1f65cb8f45d656ce61d94e4a605d861615 | 891423418a9c6ac5d6fbe2bbc9c51087ae7d9b03 | refs/heads/master | 2023-06-11T10:38:10.807421 | 2021-06-15T23:54:51 | 2021-06-15T23:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,461 | py | import time
from .primitives import extend_towards
from .rrt import TreeNode, configs
from .utils import irange, RRT_ITERATIONS, INF, elapsed_time
def wrap_collision_fn(collision_fn):
# TODO: joint limits
# import inspect
# print(inspect.getargspec(collision_fn))
# print(dir(collision_fn))
def fn(q1, q2):
try:
return collision_fn(q1, q2)
except TypeError:
return collision_fn(q2)
return fn
def rrt_connect(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,
max_iterations=RRT_ITERATIONS, max_time=INF, **kwargs):
"""
:param start: Start configuration - conf
:param goal: End configuration - conf
:param distance_fn: Distance function - distance_fn(q1, q2)->float
:param sample_fn: Sample function - sample_fn()->conf
:param extend_fn: Extension function - extend_fn(q1, q2)->[q', ..., q"]
:param collision_fn: Collision function - collision_fn(q)->bool
:param max_iterations: Maximum number of iterations - int
:param max_time: Maximum runtime - float
:param kwargs: Keyword arguments
:return: Path [q', ..., q"] or None if unable to find a solution
"""
# TODO: goal sampling function connected to a None node
start_time = time.time()
if collision_fn(start) or collision_fn(goal):
return None
# TODO: support continuous collision_fn with two arguments
#collision_fn = wrap_collision_fn(collision_fn)
nodes1, nodes2 = [TreeNode(start)], [TreeNode(goal)] # TODO: allow a tree to be prespecified (possibly as start)
for iteration in irange(max_iterations):
if elapsed_time(start_time) >= max_time:
break
swap = len(nodes1) > len(nodes2)
tree1, tree2 = nodes1, nodes2
if swap:
tree1, tree2 = nodes2, nodes1
target = sample_fn()
last1, _ = extend_towards(tree1, target, distance_fn, extend_fn, collision_fn,
swap, **kwargs)
last2, success = extend_towards(tree2, last1.config, distance_fn, extend_fn, collision_fn,
not swap, **kwargs)
if success:
path1, path2 = last1.retrace(), last2.retrace()
if swap:
path1, path2 = path2, path1
#print('{} max_iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2)))
path = configs(path1[:-1] + path2[::-1])
# TODO: return the trees
return path
return None
#################################################################
def birrt(start, goal, distance_fn, sample_fn, extend_fn, collision_fn, **kwargs):
"""
:param start: Start configuration - conf
:param goal: End configuration - conf
:param distance_fn: Distance function - distance_fn(q1, q2)->float
:param sample_fn: Sample function - sample_fn()->conf
:param extend_fn: Extension function - extend_fn(q1, q2)->[q', ..., q"]
:param collision_fn: Collision function - collision_fn(q)->bool
:param kwargs: Keyword arguments
:return: Path [q', ..., q"] or None if unable to find a solution
"""
# TODO: deprecate
from .meta import random_restarts
solutions = random_restarts(rrt_connect, start, goal, distance_fn, sample_fn, extend_fn, collision_fn,
max_solutions=1, **kwargs)
if not solutions:
return None
return solutions[0]
| [
"[email protected]"
] | |
938d74f683b6899da1a3a4e45a9ca95feeccf13d | 5b777b268b804bc984f87d714ef25677ab10fab1 | /causallib/estimation/marginal_outcome.py | 6c83f15371c3f78daa5f11e480dbc6c2d0148bae | [
"Apache-2.0"
] | permissive | vishalbelsare/causallib | 71c06cafbf9d3f2163c4921d64cab8d36413ca67 | 9f0ddb4696d580cf0a529a6c6ce98b40b34e3796 | refs/heads/master | 2023-07-10T09:57:57.293064 | 2022-12-19T15:19:28 | 2022-12-19T15:19:28 | 230,206,247 | 0 | 0 | Apache-2.0 | 2022-12-22T00:45:47 | 2019-12-26T06:14:10 | Python | UTF-8 | Python | false | false | 3,669 | py | """
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Apr 25, 2018
"""
import pandas as pd
from .base_weight import WeightEstimator
from .base_estimator import PopulationOutcomeEstimator
class MarginalOutcomeEstimator(WeightEstimator, PopulationOutcomeEstimator):
"""
A marginal outcome predictor.
Assumes the sample is marginally exchangeable, and therefore does not correct (adjust, control) for covariates.
Predicts the outcome/effect as if the sample came from a randomized control trial: $\\Pr[Y|A]$.
"""
def compute_weight_matrix(self, X, a, use_stabilized=None, **kwargs):
# Another way to view this is that Uncorrected is basically an IPW-like with all individuals equally weighted.
treatment_values = a.unique()
treatment_values = treatment_values.sort()
weights = pd.DataFrame(data=1, index=a.index, columns=treatment_values)
return weights
def compute_weights(self, X, a, treatment_values=None, use_stabilized=None, **kwargs):
# Another way to view this is that Uncorrected is basically an IPW-like with all individuals equally weighted.
weights = pd.Series(data=1, index=a.index)
return weights
def fit(self, X=None, a=None, y=None):
"""
Dummy implementation to match the API.
MarginalOutcomeEstimator acts as a WeightEstimator that weights each sample as 1
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
y (pd.Series): Observed outcome of size (num_subjects,).
Returns:
MarginalOutcomeEstimator: a fitted model.
"""
return self
def estimate_population_outcome(self, X, a, y, w=None, treatment_values=None):
"""
Calculates potential population outcome for each treatment value.
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
y (pd.Series): Observed outcome of size (num_subjects,).
w (pd.Series | None): Individual (sample) weights calculated. Used to achieved unbiased average outcome.
If not provided, will be calculated on the data.
treatment_values (Any): Desired treatment value/s to stratify upon before aggregating individual into
population outcome.
If not supplied, calculates for all available treatment values.
Returns:
pd.Series[Any, float]: Series which index are treatment values, and the values are numbers - the
aggregated outcome for the strata of people whose assigned treatment is the key.
"""
if w is None:
w = self.compute_weights(X, a)
res = self._compute_stratified_weighted_aggregate(y, sample_weight=w, stratify_by=a,
treatment_values=treatment_values)
return res
| [
"[email protected]"
] | |
d627be799d34ca09b15dbb8a8ba4999497693d40 | babc3e26d66a8084c9f84a0431338bafabae6ffd | /TaeJuneJoung/COD/lv2.OddOccurrencesInArray.py | d4d409749fec81fd81fda721db9af44dc3514b7c | [] | no_license | hoteldelluna/AlgoStudy | 5c23a1bfb07dbfbabc5bedd541d61784d58d3edc | 49ec098cecf2b775727d5648161f773e5488089b | refs/heads/dev | 2022-10-09T14:29:00.580834 | 2020-01-25T14:40:55 | 2020-01-25T14:40:55 | 201,632,052 | 5 | 0 | null | 2020-01-25T14:40:57 | 2019-08-10T13:11:41 | Python | UTF-8 | Python | false | false | 1,356 | py | """
무조건 하나만 홀수가 발생하니
마지막 index는 짝수일 수밖에 없다.(0부터 시작이니)
[조건]
1. A의 크기가 1인 경우
2. 홀수가 중간에 있는 경우
3. 홀수가 맨 마지막에 있는 경우
"""
def solution(A):
A.sort()
for i in range(0, len(A)-1, 2):
if A[i] != A[i+1]:
# 조건2 - 홀수가 1개밖에 없으니 답이 아니라면 짝수개이므로 앞에 것이 틀리다.
return A[i]
# 조건1, 3 - 조건2에서 끝나지 않았다면 맨 마지막 값이 답
return A[-1]
"""
[처음 풀이]
시도를 해본 문제
문제를 이해를 잘못한 부분도 한몫하였고,
효율성을 가장 크게 생각해야했던 문제
처음에는 set으로 감싸서 중복을 없앤 후,
해당 set내용으로 A.count를 하였으나 N^2이 나와 실패
Dict형태도 퍼포먼스에서는 좋지 않았다.
어떻게 짜면 효율적일지 다른 방도로 생각해보면 좋을듯한 문제.
현재 방법은 100점이나, 더 좋은 방도가 없을까?
"""
def solution(A):
A.sort()
if len(A) < 2:
return A[0]
cnt = 1
for i in range(1, len(A)):
if A[i-1] == A[i]:
cnt += 1
else:
if cnt%2:
return A[i-1]
else:
cnt = 1
return A[i] | [
"[email protected]"
] | |
92ea82d00e3baa47f0708f8943155310bef045d0 | eda9187adfd53c03f55207ad05d09d2d118baa4f | /python3_base/exception.py | 78bffed238c1ab8437126e7d6c33d8e406d2aae6 | [] | no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
Author:Yan Errol
Email:[email protected]
Wechat:qq260187357
Date:2019-04-29--21:59
Describe:异常诊断
'''
import time
def func():
try:
for i in range(5):
if i >3:
raise Exception("数字大于3了==")
except Exception as ret:
print (ret)
func()
import re
a = "张明 99分"
ret = re.sub(r"\d+","100",a)
print (ret)
a = [1,2,3]
b = [4,5,6]
print(a+b) | [
"[email protected]"
] | |
46a9ea2d394fede56dd4689d643f5f6492dbb5d8 | 9e05aa78126e76040e4afdd83c1eba95a9c787f5 | /generator/list2.py | 9ddb23a03b2684eb7ade8a8f5033cda8d41be041 | [
"MIT"
] | permissive | lreis2415/geovalidator | 8df4cb4671288b1242d0035cf1cde1944676e1df | dd64b0577aa458b39022afa503e890e966eb56d8 | refs/heads/master | 2022-12-10T18:32:41.293337 | 2021-03-10T01:04:20 | 2021-03-10T01:04:20 | 233,007,264 | 0 | 0 | MIT | 2022-12-08T08:04:28 | 2020-01-10T09:00:31 | Python | UTF-8 | Python | false | false | 1,131 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2020/1/4 16:10
from rdflib import BNode, Graph, RDF, Namespace, Literal
from rdflib.namespace import DCTERMS
g = Graph()
# namespaces
data = Namespace("http://www.egc.org/ont/data#")
saga = Namespace("http://www.egc.org/ont/process/saga#")
sh = Namespace("http://www.w3.org/ns/shacl#")
process = Namespace('http://www.egc.org/ont/gis/process#')
# prefixes
g.bind('data', data)
g.bind('sh', sh)
g.bind('saga', saga)
g.bind('process', process)
g.bind('dcterms', DCTERMS)
# SHACL shape graph
ds = saga.FlowAccumulationTopDownShape
g.add((ds, RDF.type, sh.NodeShape))
# [tool]_[parameter]
g.add((ds, sh.targetNode, saga.method_of_flow_accumulation_top_down))
p1 = BNode()
g.add((p1, sh.path, process.hasData))
g.add((p1, sh.minCount, Literal(0)))
g.add((p1, sh.maxCount, Literal(1)))
g.add((p1, sh.message, Literal('Must has at most one input value for option ‘Method’ of tool ‘Flow Accumulation (Top-Down)’', lang='en')))
g.add((ds, sh.property, p1))
# save as turtle file
g.serialize('../shapes/L2_FunctionalityLevelShape.ttl', format='turtle')
| [
"[email protected]"
] | |
7562eab065b565fc40986e5b85bde0cffe2bf27d | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/caching/local/handlers/__init__.py | 4f7d9594e8de35a0a70f0749192b9e0b9fa7c5d4 | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # -*- coding: utf-8 -*-
"""
caching local handlers package.
"""
from pyrin.packaging.base import Package
class CachingLocalHandlersPackage(Package):
"""
caching local handlers package class.
"""
NAME = __name__
DEPENDS = ['pyrin.configuration',
'pyrin.globalization.datetime',
'pyrin.logging']
| [
"[email protected]"
] | |
8f8808d79b13456226c20d29fa09308ae24382df | cdf23a2b22b0d0643f9bf48fd8c7d0a8ef83945d | /qstrader/utils/console.py | beee1fa020b1139e7988a543dd9ea3de95049652 | [
"MIT"
] | permissive | PabloHebe/qstrader | 2e23d267e0e2cf6632011eaea486891c8eed4c17 | 81c9473fbb782220c5cced2e331fb7a7b0b0082d | refs/heads/master | 2022-08-27T10:28:27.411188 | 2019-12-16T14:17:40 | 2019-12-16T14:17:40 | 111,547,620 | 0 | 1 | MIT | 2020-01-05T12:54:16 | 2017-11-21T12:42:55 | Python | UTF-8 | Python | false | false | 258 | py | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def string_colour(text, colour=WHITE):
"""
Create string text in a particular colour to the terminal.
"""
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
return seq
| [
"[email protected]"
] | |
c7d99b683e6acbbe80cbc85721394ac0f1c7323f | f999bc5a6e0da4f0904ef2112d7b6191f180ca5b | /Advent of code/Day2_Part1.py | 44f5dafb0aa805206e823978d61b1740a82b147f | [] | no_license | ritesh-deshmukh/Algorithms-and-Data-Structures | 721485fbe91a5bdb4d7f99042077e3f813d177cf | 2d3a9842824305b1c64b727abd7c354d221b7cda | refs/heads/master | 2022-11-09T00:18:51.203415 | 2018-10-08T22:31:05 | 2018-10-08T22:31:05 | 132,504,988 | 0 | 1 | null | 2022-10-23T00:51:15 | 2018-05-07T19:07:33 | Python | UTF-8 | Python | false | false | 1,297 | py | # f = open("elves_input", "r")
# if f.mode == "r":
# input_task = f.read()
# input_task = f.readlines()
# for symbol in input_task:
# dimensions = symbol.split("x")
# print(dimensions)
with open('elves_input') as f:
dimensions_data = []
for line in f:
line = line.split('x') # to deal with blank
if line: # lines (ie skip them)
line = [int(i) for i in line]
dimensions_data.append(line)
# product = dimensions_data[0][0]
# print(dimensions_data[0])
total_area = 0
for dimensions in dimensions_data:
# sorted = sorted(dimensions)
# small_side_1 = sorted[0]
# small_side_2 = sorted[1]
area = ((2* dimensions[0] * dimensions[1])
+ (2* dimensions[1] * dimensions[2])
+ (2* dimensions[0] * dimensions[2]))
total_area += area
# print(sorted)
print(f"Area total: {total_area}")
total_small_side = 0
for dimensions1 in dimensions_data:
area1 = sorted(dimensions1)
# print(area1[0] * area1[1])
small_side = area1[0] * area1[1]
total_small_side += small_side
print(f"Small side total: {total_small_side}")
answer = total_area + total_small_side
print(f"Total Square feet: {answer}") | [
"[email protected]"
] | |
1e68f4426a5b3835594ad8792a036f353f9b5734 | 32eba552c1a8bccb3a329d3d152b6b042161be3c | /15_pj_pdf_merger.py | d316f0b6a7a805701c4abd4debff148e5b564734 | [] | no_license | ilmoi/ATBS | d3f501dbf4b1099b76c42bead3ec48de3a935a86 | 7f6993751e2ad18af36de04168d32b049d85a9c1 | refs/heads/master | 2022-07-11T21:56:23.284871 | 2020-05-15T05:26:06 | 2020-05-15T05:26:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | """Finds all pdfs in cur dir > sorts alphabetically > merges together taking the first page only once."""
import PyPDF2
import os
import re
# prep the files list
files = os.listdir()
chosen = []
r = re.compile(r'.*\.pdf')
for file in files:
try:
mo = r.search(file)
# print(mo.group())
chosen.append(mo.group())
except:
pass
chosen.sort()
# manually removing the encrypted file (cba)
chosen.pop(1)
chosen.pop(1)
print(chosen)
# create writer
writer = PyPDF2.PdfFileWriter()
# iterate through files and pages and write them all down
for i, file in enumerate(chosen):
with open(file, 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
# for first doc - add the first page too
if i == 0:
pageObj = reader.getPage(0)
writer.addPage(pageObj)
# for all docs
for p in range(1, reader.numPages):
pageObj = reader.getPage(p)
writer.addPage(pageObj)
# finally write
# NOTE this one needs to sit inside of the with open statement or the pages will be blank!
with open('longfile.pdf', 'wb') as f:
writer.write(f)
# lets check number of pages matches
for file in chosen:
with open(file, 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
print(reader.numPages)
print('compare that to ----->')
with open('longfile.pdf', 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
print(reader.numPages)
# sounds correct!
| [
"[email protected]"
] | |
9dedd846ed49f891c3ea2109f26b3eed81fcdf88 | 320bf3ddd6233577d9f2f08f046eaef96f881e4e | /simplemooc/core/urls.py | eb0570de064c9f271570646c26f555b2bce99b28 | [
"MIT"
] | permissive | leorzz/simplemooc | 057ba3e220c20907017edfd8d0fc0422f9a6d99c | 8b1c5e939d534b1fd729596df4c59fc69708b896 | refs/heads/master | 2022-10-22T02:24:46.733062 | 2017-12-17T16:37:04 | 2017-12-17T16:37:04 | 112,488,280 | 0 | 1 | MIT | 2022-10-08T17:50:17 | 2017-11-29T14:52:23 | Python | UTF-8 | Python | false | false | 523 | py | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import simplemooc.core.views
urlpatterns = [
url(r'^$', simplemooc.core.views.home, name='home'),
url(r'^contact/$',simplemooc.core.views.contact, name='contact'),
url(r'^about/$',simplemooc.core.views.about, name='about'),
]
#urlpatterns = patterns('simplemooc.core.views',
# url(r'^$','home', name='home'),
# url(r'^contact/$','contact', name='contact'),
# url(r'^about/$','about', name='about'),
#)
| [
"[email protected]"
] | |
c2cfda99592ea8ed25c13139448162753c8e3e09 | 6ff7b3cd99aea670792aad35f49b4d762bd3952a | /migrations/versions/f8f3d3338933_initial.py | e1fddda3a2e45b3ac97c7d14513b75afa99b9458 | [] | no_license | melardev/FlaskApiCrud | 3af8c1f375f6aefe258334368fdc7bcab900a2a0 | 40e0ffe6f690a1698a3c3f6dd1a03398260cd073 | refs/heads/master | 2020-04-27T23:06:51.527985 | 2019-03-10T00:52:26 | 2019-03-10T00:52:26 | 174,762,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | """'initial'
Revision ID: f8f3d3338933
Revises:
Create Date: 2019-03-08 21:02:46.699000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f8f3d3338933'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('todos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('completed', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('todos')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
dfbba26851a42e9ca1b1a62230992475e7e16da9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/12/usersdata/76/5514/submittedfiles/impedimento.py | e64198eb3222c4320efd7c71f70c7c45cd091526 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CÓDIGO AQUI
L = input('Digite o valor de L:')
R = input('Digite o valor de R:')
D = input('Digite o valor de D:')
if R>50 and L<R and R>D:
print('S')
if R>50 and L<R and R<D:
print('N') | [
"[email protected]"
] | |
aad8c4965b91dbc1a68802b5dc45aa593d98d20a | d65cb684d344ab072d0f9801afbd074768a059a2 | /Suanfa/天际线问题Suanfa1_3.py | e826e906c21a8c27b4b7e96acc49c55fb8d6548d | [] | no_license | QiuHongHao123/Algorithm-Practise | a918debd002182010b78e284df038c01d9921619 | e7a7b7537edbbb8fa35c2dddf2b122cf863e479d | refs/heads/master | 2023-03-14T09:16:28.407137 | 2021-03-01T11:57:54 | 2021-03-01T11:57:54 | 272,642,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | def getSkyline(buildings):
if not buildings: return []
if len(buildings) == 1:
return [[buildings[0][0], buildings[0][2]], [buildings[0][1], 0]]
mid = len(buildings) // 2
left = getSkyline(buildings[:mid])
right = getSkyline(buildings[mid:])
return merge(left, right)
def merge(left, right):
# 记录目前左右建筑物的高度
lheight = rheight = 0
# 位置
l = r = 0
res = []
while l < len(left) and r < len(right):
if left[l][0] < right[r][0]:
cp = [left[l][0], max(left[l][1], rheight)]
lheight = left[l][1]
l += 1
elif left[l][0] > right[r][0]:
cp = [right[r][0], max(right[r][1], lheight)]
rheight = right[r][1]
r += 1
else:
cp = [left[l][0], max(left[l][1], right[r][1])]
lheight = left[l][1]
rheight = right[r][1]
l += 1
r += 1
# 和前面高度比较,不一样才加入
if len(res) == 0 or res[-1][1] != cp[1]:
res.append(cp)
# 剩余部分添加进去
res.extend(left[l:] or right[r:])
return res
print(getSkyline([[1,5,11], [2,7,6], [3,9,13], [12,16,7], [14,25,3], [19,22,18], [23,29,13],[24,28,4]])) | [
"[email protected]"
] | |
869d7f8aec582f9c09dfa15e9791d99d7c9c617d | 170a4c0b1accb9468567f6a88254ff738f2a8166 | /EQ5D.py | 3fab62e39350976be780783eaeae004522dfd006 | [] | no_license | yazhisun/Labs_PreferenceScores | 2ecd9acdb21403f912200db1fa41f0f6e325ef18 | 3eb0ec0e55f1772b15a3108dd0a85dbcf75e1743 | refs/heads/master | 2021-05-09T22:56:44.996009 | 2018-01-18T16:03:31 | 2018-01-18T16:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
# EQ-5D regression coefficients
Constant = 0.081
N3 = 0.269
dictCoefficients = {'Mobility': [0, 0.069, 0.314],
'Self-Care': [0, 0.104, 0.214],
'Usual Activity': [0, 0.036, 0.094],
'Pain/Discomfort': [0, 0.123, 0.386],
'Anxiety/Depression': [0, 0.071, 0.236]};
| [
"[email protected]"
] | |
49681f30a6612dac501c48d0b1e070e630b6bf72 | fd9257a4cc04b89c2b8c92008770a82ccdfe85bd | /build/spdlog/catkin_generated/generate_cached_setup.py | 3db5318c1429f193fb60f8495755cfd61895d77f | [] | no_license | Zauberr/KAL | 40b135f02e9ae9c7bf55b064094aaff88c43628e | 225e538058b632c8c14cc638e12fcb124bd81e29 | refs/heads/master | 2020-08-16T18:26:19.863213 | 2019-10-16T13:38:46 | 2019-10-16T13:38:46 | 215,537,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/mrtros/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/mrtros/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/kal5-2/rammbo/devel;/opt/mrtros;/opt/mrtsoftware/local;/opt/mrtsoftware/release".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/kal5-2/rammbo/devel/.private/spdlog/env.sh')
output_filename = '/home/kal5-2/rammbo/build/spdlog/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
4c69aba309858501551b000e6236b893e0d8f7f7 | 30b2eb381ec8f3225671274e77a55b63206dfb60 | /leetcode/p0461/solve.py | d9975e98a22b92ab40c7733e7fe0660fbb2ee3ca | [] | no_license | b1ueskydragon/PythonGround | 52888f32336e5e20be8490454beb67e676be7057 | 5a401250e88926235f581e6c004d1a4acb44230d | refs/heads/master | 2021-07-10T03:00:38.340959 | 2021-04-02T03:34:29 | 2021-04-02T03:34:29 | 98,208,402 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
ones = 0
while xor:
if (xor | 1) == xor:
ones += 1
xor >>= 1
return ones
if __name__ == '__main__':
s = Solution()
"""
011
101
---
110
count = 2
"""
print(s.hammingDistance(3, 5))
| [
"[email protected]"
] | |
5493b2f3a565402852a6d878c4d63e0d4b1c5509 | 3263139017e2e3cc253e93a9fb92604b00176466 | /pias/pias_logging.py | 761213610fb3cf88f47af4c7ab242ecf47990d20 | [] | no_license | saalfeldlab/pias | 245fb589b30e197fc03c152231ecc138d6ac7ae3 | acc7c19dc0ca81b846816ec0d0edf7ff87d46665 | refs/heads/master | 2020-04-22T06:38:58.126298 | 2019-03-10T19:01:53 | 2019-03-10T19:01:56 | 170,197,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import logging
print(logging)
trace = logging.DEBUG- 5
logging.TRACE = trace
logging.addLevelName(trace, 'TRACE')
class PiasLogger(logging.getLoggerClass()):
def trace(self, msg, *args, **kwargs):
self.log(trace, msg, *args, **kwargs)
logging.setLoggerClass(PiasLogger)
levels = ('NOTSET', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL', 'FATAL', 'TRACE') | [
"[email protected]"
] | |
ba81708752f0fb17ace59645543fa3e7548bc1cb | 6bfcb3b91c2489cab0d9788079f69f37cf7e1387 | /test/test-bbox.py | fdd971e119df9736e87277292955aa7e59241bc5 | [
"BSD-3-Clause"
] | permissive | glamod/cdm-lens | 02f77f4270594acfadcf5b628bcdd8ea9a574b46 | d257906a3cd9fd01c118777803ef6b880b15ba81 | refs/heads/master | 2023-01-28T17:44:25.861444 | 2023-01-13T08:55:13 | 2023-01-13T08:55:13 | 212,615,087 | 1 | 0 | NOASSERTION | 2022-12-08T06:50:15 | 2019-10-03T15:34:44 | Python | UTF-8 | Python | false | false | 2,423 | py | import requests
import pandas as pd
import io
import math
TMPL = 'http://glamod2.ceda.ac.uk/select/?domain=land&frequency=monthly&variable=accumulated_precipitation,air_temperature&intended_use=non-commercial&data_quality=quality_controlled&column_selection=detailed_metadata&year=1974&month=03&bbox={w}.0,{s}.0,{e}.0,{n}.0&compress=false'
def _assert_in_range(df, w, s, e, n, to_nearest_degree=False):
if len(df) == 0:
print('Empty df')
return
lats, lons = df.latitude, df.longitude
min_lat, max_lat = lats.min(), lats.max()
min_lon, max_lon = lons.min(), lons.max()
print(f'Wanted lons: {w} to {e}; lats: {s} to {n}')
print(f'Actual lons: {min_lon} to {max_lon}; lats: {min_lat} to {max_lat}')
def fix(n):
if n < 0:
return math.ceil(n)
else:
return math.floor(n)
if to_nearest_degree:
min_lat, max_lat, min_lon, max_lon = [fix(_) for _ in [min_lat, max_lat, min_lon, max_lon]]
# print(lats, lats.max(), lats.min())
assert(min_lat >= s), 'min_lat >= s'
assert(max_lat <= n), 'max_lat <= n'
if min_lat == max_lat and min_lat == -90 or min_lat == 90:
print('Longitude checks are meaningless at the north/south pole')
return
if 90 in list(lats) or -90 in list(lats):
print('Some lats are north/south pole - so ignore longitude checks')
assert(min_lon >= w), 'min_lon >= w'
assert(max_lon <= e), 'max_lon <= e'
def _fetch_as_df(w, s, e, n):
url = TMPL.format(**vars())
print(f'{url}')
content = requests.get(url).text
if content.startswith('Exception raised'):
print(f'[ERROR] Fetch error: {content}')
return content
return pd.read_csv(io.StringIO(content))
def test_bbox_in_range():
for w in range(-180, 160, 30):
e = w + 30
for s in range(-90, 61, 30):
n = s + 30
df = _fetch_as_df(w, s, e, n)
_assert_in_range(df, w, s, e, n, True)
def test_bbox_full_range():
bboxes = ['-180,-90,180,90'] #, '-90,90,-180,180', '-90,-180,90,180']
for bbox in bboxes:
w, s, e, n = [int(_) for _ in bbox.split(',')]
df = _fetch_as_df(w, s, e, n)
if type(df) == str:
continue
_assert_in_range(df, w, s, e, n, True)
if __name__ == '__main__':
test_bbox_full_range()
test_bbox_in_range()
| [
"[email protected]"
] | |
1e4c3dc8648edeb0f51d861b4003419811ebc27a | 28b6e6a35b6591f36140b6cb907ac60c71dbcab1 | /app/migrations/0001_initial.py | b9dba1404818a767036d64bf7989c45046f5bdcf | [] | no_license | mahmud-sajib/Social-Profile-Rest-Api | 97c89af42439d08e730b3901fc76ac21cc3a7280 | 5c84ad847ce3303d63284a4363a2b1b4aaf76319 | refs/heads/master | 2023-03-22T09:24:00.439550 | 2021-03-15T08:18:44 | 2021-03-15T08:18:44 | 347,887,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # Generated by Django 3.1 on 2021-02-28 06:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/%Y/%m/%d')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
0a6291eaae1de1fc9b8321ad436642d3776c3ae5 | d48dfa622e07d346a91be3aa8e8657e409faf552 | /RozbudowaKodu/lab_files/lab_file_2.py | 6b25fe1a7a19217f18835bf54768e39c3fa1b477 | [] | no_license | sineczek/PythonSrednioZaawansowany | 71c8c94f7cdc193482a50b94315b86e1f0ab0039 | 75823b36de99ef9ac487672cf131a0b84ce23d2b | refs/heads/main | 2023-03-14T04:33:26.853500 | 2021-03-06T18:13:02 | 2021-03-06T18:13:02 | 332,524,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import math
argument_list = []
results_list = []
for i in range (1000000):
argument_list.append(i/10)
for x in argument_list:
results_list.append(abs(x**3 - x**0.5))
print('min = {} max = {}'.format(min(results_list), max(results_list))) | [
"[email protected]"
] | |
c668614ba1c31b9ddada5697bd9bd9833931bd3e | d28a65d23c204a9736b597ae510d9dd54d2ffd0f | /bin/newdb | cbffe8f0ede31ac97c8ea7393d309dee7b9fa505 | [
"BSD-3-Clause"
] | permissive | cts2/rf2db | 99ba327611e620fc5533245064afcc1daff7c164 | 985cd7ad84c8907306a0d7d309d4a1c0fb422ba4 | refs/heads/master | 2020-05-17T22:37:25.476553 | 2015-08-24T22:18:19 | 2015-08-24T22:18:19 | 15,264,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,053 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import argparse
import os
# Assuming that we are running in the bin directory
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
sys.path.append(os.path.join(_curdir, '..'))
# TODO: Make this into a package
sys.path.append(os.path.join(_curdir, '..', '..', 'ConfigManager'))
from rf2db.db.RF2DBConnection import config_parms, debug_parms, cp_values, db_values, RF2DBConnection
helpargs = ['-h', '-?']
def main(argv):
""" Create a MySQL database for RF2 files and/or manage the connection parameters. Example sequence:
* python newdb --upd ../../rf2service/settings.conf --host localhost --db rf220140731 --charset utf8 --user root --passwd pw
* python newdb --show ../../rf2service/settings.conf
* python newdb --create ../../rf2service/settings.conf
"""
parser = argparse.ArgumentParser(description="Set up RF2 DB parameters and optionally create a database")
parser.add_argument('configfile', help="configuration file location")
parser.add_argument('--show', dest='show', action="store_true", help="show current configuration")
parser.add_argument('--upd', dest='update', action="store_true", help="update configuration file")
parser.add_argument('--create', action="store_true", help="create database if it doesn't exist")
# Can't do a lot more if there isn't configuration file
if len(argv) == 0 or (len(argv) == 1 and argv[0] in helpargs):
config_parms.add_to_parser(parser)
debug_parms.add_to_parser(parser)
parser.parse_args(argv)
return
# There is (or should be) a configuration file -- pick it out of the arguments and then reparse
args = [e for e in argv if e not in helpargs]
fileopt, _ = parser.parse_known_args(args)
# Open the existing configuration file so we know what the defaults should be
cp_values.set_configfile(fileopt.configfile)
config_parms.add_to_parser(parser, cp_values)
debug_parms.add_to_parser(parser, db_values)
opts = parser.parse_args(argv)
cp_values.update(vars(opts))
if opts.show:
print(str(cp_values))
if opts.update or not opts.show:
if cp_values.flush():
print("\nConfiguration file updated")
if opts.create:
RF2DBConnection().newDB()
print("Database %s created in %s" % (cp_values.db, cp_values.host + ((':' + cp_values.port) if cp_values.port else '')))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | ||
852065b653ca396ea321c7ff5ad1faeaba1cebe6 | 88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b | /src/api/python/hce/ftests/ftest_exit_code_simple.py | b2c4a8d9a82b0002258fc983f2ffd5611aca4435 | [] | no_license | hce-project/hce-bundle | 2f93dc219d717b9983c4bb534884e4a4b95e9b7b | 856a6df2acccd67d7af640ed09f05b2c99895f2e | refs/heads/master | 2021-09-07T22:55:20.964266 | 2018-03-02T12:00:42 | 2018-03-02T12:00:42 | 104,993,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #!/usr/bin/python
"""
HCE project, Python bindings, Distributed Tasks Manager application.
RTCFinalizer Class content main functional for finalize realtime crawling.
@package: dc
@file rtc-finalizer.py
@author Oleksii <[email protected]>, bgv, Alexander Vybornyh <[email protected]>
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2015 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
"""
import ppath
from ppath import sys
import os
import sys
os._exit(11)
| [
"bgv@bgv-d9"
] | bgv@bgv-d9 |
8025ba35b9d424317c8728eb00872d51f226b847 | 5fe083b1082dd960dda5789b1cac7287be1d882b | /bin/parse_oneway.py | ade40fc8bd3d76417799a19345007a36ee098b97 | [
"MIT"
] | permissive | single-cell-rna-sequencing/scanorama | d412a98386354483a7ae768cb314731084c36431 | 60d21e5f71722baedc1cc0c2f0bff0338116b16a | refs/heads/master | 2020-05-18T19:03:02.178470 | 2018-12-11T23:14:55 | 2018-12-11T23:14:55 | 184,600,314 | 0 | 1 | null | 2019-05-02T14:55:33 | 2019-05-02T14:55:33 | null | UTF-8 | Python | false | false | 1,088 | py | import numpy as np
from scanorama import plt
plt.rcParams.update({'font.size': 25})
import sys
scano, uncor = {}, {}
in_scano = True
for line in open(sys.argv[1]):
fields = line.rstrip().split()
if len(fields) > 3:
continue
try:
F = float(fields[1])
except ValueError:
continue
if in_scano:
scano[fields[0]] = F
else:
uncor[fields[0]] = F
if fields[0] == 'ZZZ3':
in_scano = False
scanorama, uncorrected = [], []
for gene in set(scano.keys()) & set(uncor.keys()):
scanorama.append(scano[gene])
uncorrected.append(uncor[gene])
scanorama = np.array(scanorama)
uncorrected = np.array(uncorrected)
below = sum(scanorama > uncorrected + 50)
above = sum(scanorama < uncorrected - 50)
print('{}% above line'.format(float(above) / float(above + below) * 100))
name = sys.argv[1].split('.')[0]
line = min(max(scanorama), max(uncorrected))
plt.figure()
plt.scatter(scanorama, uncorrected, s=10)
plt.plot([0, line], [0, line], 'r--')
plt.tight_layout()
plt.savefig('oneway_{}.png'.format(name))
| [
"[email protected]"
] | |
32f4c462ec8097a34c1519e066a80a65f1a14c8f | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /contest24/matrix.py | 6a654f89bbe393517b379bdacf7311c9a7f2387e | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | """
Given a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell.
The distance between two adjacent cells is 1.
Example 1:
Input:
0 0 0
0 1 0
0 0 0
Output:
0 0 0
0 1 0
0 0 0
Example 2:
Input:
0 0 0
0 1 0
1 1 1
Output:
0 0 0
0 1 0
1 2 1
Note:
The number of elements of the given matrix will not exceed 10,000.
There are at least one 0 in the given matrix.
The cells are adjacent in only four directions: up, down, left and right.
"""
class Solution(object):
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
indexes = [(i, j) for i in range(len(matrix)) for j in range(len(matrix[0])) if matrix[i][j] == 1]
matrix = [[0 if val == 0 else -1 for val in row]for row in matrix]
curr_level = 0
while len(indexes) > 0:
new_indexes = []
for index in indexes:
done = False
x = index[0]
y = index[1]
if x > 0:
if matrix[x - 1][y] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if y > 0:
if matrix[x][y - 1] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if x < len(matrix) - 1:
if matrix[x + 1][y] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if y < len(matrix[0]) - 1:
if matrix[x][y + 1] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if not done:
new_indexes.append(index)
curr_level += 1
indexes = new_indexes
return matrix
ans = Solution()
print(ans.updateMatrix([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]))
print(ans.updateMatrix([
[1, 1, 1],
[0, 1, 0],
[0, 0, 0]
]))
| [
"[email protected]"
] | |
dbee469da3d768ac8bd9b40a106f32df70d98ae3 | 069dafce9f495f09bf8c2f76dbf5c045b7551721 | /run_size_V1_inhibition_overlapping.py | 2445079f234bb3bce526b0f73ebe9143a77a5600 | [] | no_license | dguarino/T2 | 26b1bc640812aa5438b09f9fab2bc73096cd7eef | 66b786928508089492f5f696c7c1576e098c6615 | refs/heads/master | 2020-04-03T22:39:06.059845 | 2020-03-13T15:43:02 | 2020-03-13T15:43:02 | 41,812,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | # -*- coding: utf-8 -*-
"""
This is
"""
from pyNN import nest
import sys
import mozaik
import mozaik.controller
from mozaik.controller import run_workflow, setup_logging
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from parameters import ParameterSet
from model_V1_full import ThalamoCorticalModel
from experiments import create_experiments_size_V1_inactivated_overlapping
from analysis_and_visualization import perform_analysis_test
from analysis_and_visualization import perform_analysis_and_visualization
from analysis_and_visualization import perform_analysis_and_visualization_radius
try:
from mpi4py import MPI
except ImportError:
MPI = None
if MPI:
mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0
logger = mozaik.getMozaikLogger()
# Manage what is executed
# a set of variable here to manage the type of experiment and whether the pgn, cortex are there or not.
withPGN = True #
withV1 = True # open-loop
withFeedback_CxPGN = True # closed loop
withFeedback_CxLGN = True # closed loop
# Model execution
if True:
data_store,model = run_workflow('ThalamoCorticalModel', ThalamoCorticalModel, create_experiments_size_V1_inactivated_overlapping )
data_store.save()
# or only load pickled data
else:
setup_logging()
# data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'Deliverable/ThalamoCorticalModel_data_size_overlapping_____', 'store_stimuli' : False}),replace=True)
data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'ThalamoCorticalModel_data_size_overlapping_____', 'store_stimuli' : False}),replace=True)
logger.info('Loaded data store')
# Analysis and Plotting
if mpi_comm.rank == MPI_ROOT:
# perform_analysis_test( data_store )
# perform_analysis_and_visualization( data_store, 'luminance', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'contrast', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'spatial_frequency', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'temporal_frequency', withPGN, withV1 )
perform_analysis_and_visualization( data_store, 'size', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'size_radius', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'orientation', withPGN, withV1 )
# import numpy
# step = .2
# for i in numpy.arange(step, 2.+step, step):
# perform_analysis_and_visualization_radius( data_store, 'size_radius', [i-step,i], withPGN, withV1 )
data_store.save()
| [
"[email protected]"
] | |
d74fff88ba05004f13b29253044811a8d2b7d787 | 3249577773cf18e5c09ea36de62477ddb43b662b | /Python/flask_fundamentals/Disappearing Ninja/server.py | 91bc33c8de93b14123c980e0d252bf8f7f89d6c4 | [] | no_license | HollinRoberts/code | 5394abe2a7c42bbbe83d8f64a99c50a52f05792b | 8026522ab169c4174037fdf1b271de60b75d79bf | refs/heads/master | 2021-01-01T16:12:11.674680 | 2017-10-18T21:08:10 | 2017-10-18T21:08:10 | 97,786,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/ninja")
def ninja():
return render_template("ninja.html")
@app.route("/ninja/<color>")
def ninja_color(color):
if color=="blue":
return render_template("leonardo.html" )
elif color=="orange":
return render_template("michelangelo.html")
elif color=="red":
return render_template("raphael.html")
elif color=="purple":
return render_template("donatello.html")
else:
return render_template("notapril.html")
app.run(debug=True) | [
"[email protected]"
] | |
d59c7349c687bb89df6ffe6c91d0cb52724efdaa | d4eb113c44c86322b3811513a7286d176f106eb6 | /experiments/variational_autoencoder/validation/compare_results.py | 9533452ba1c680b701a373947b1b8279453615c6 | [] | no_license | philip-brohan/Machine-Learning | 67a2eb780383b3436da4fef1d763f39d255ae696 | dc53b9c336d5f12272257f327abe49dec436ea04 | refs/heads/master | 2021-03-27T12:33:07.518279 | 2020-04-30T19:38:02 | 2020-04-30T19:38:02 | 56,614,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,113 | py | #!/usr/bin/env python
# Model training results plot
import tensorflow as tf
tf.enable_eager_execution()
import numpy
import IRData.twcr as twcr
import iris
import datetime
import argparse
import os
import math
import pickle
import Meteorographica as mg
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import cartopy
import cartopy.crs as ccrs
# Function to resize and rotate pole
def rr_cube(cbe):
# Use the Cassini projection (boundary is the equator)
cs=iris.coord_systems.RotatedGeogCS(0.0,60.0,270.0)
# Latitudes cover -90 to 90 with 79 values
lat_values=numpy.arange(-90,91,180/78)
latitude = iris.coords.DimCoord(lat_values,
standard_name='latitude',
units='degrees_north',
coord_system=cs)
# Longitudes cover -180 to 180 with 159 values
lon_values=numpy.arange(-180,181,360/158)
longitude = iris.coords.DimCoord(lon_values,
standard_name='longitude',
units='degrees_east',
coord_system=cs)
dummy_data = numpy.zeros((len(lat_values), len(lon_values)))
dummy_cube = iris.cube.Cube(dummy_data,
dim_coords_and_dims=[(latitude, 0),
(longitude, 1)])
n_cube=cbe.regrid(dummy_cube,iris.analysis.Linear())
return(n_cube)
# Get the 20CR data
ic=twcr.load('prmsl',datetime.datetime(2009,3,12,18),
version='2c')
ic=rr_cube(ic.extract(iris.Constraint(member=1)))
# Get the autoencoder
model_save_file=("%s/Machine-Learning-experiments/"+
"variational_autoencoder/"+
"/saved_models/Epoch_%04d/autoencoder") % (
os.getenv('SCRATCH'),500)
autoencoder=tf.keras.models.load_model(model_save_file,compile=False)
# Normalisation - Pa to mean=0, sd=1 - and back
def normalise(x):
x -= 101325
x /= 3000
return x
def unnormalise(x):
x *= 3000
x += 101325
return x
fig=Figure(figsize=(9.6,10.8), # 1/2 HD
dpi=100,
facecolor=(0.88,0.88,0.88,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
# Top - map showing original and reconstructed fields
projection=ccrs.RotatedPole(pole_longitude=60.0,
pole_latitude=0.0,
central_rotated_longitude=270.0)
ax_map=fig.add_axes([0.01,0.51,0.98,0.48],projection=projection)
ax_map.set_axis_off()
extent=[-180,180,-90,90]
ax_map.set_extent(extent, crs=projection)
matplotlib.rc('image',aspect='auto')
# Run the data through the autoencoder and convert back to iris cube
pm=ic.copy()
pm.data=normalise(pm.data)
ict=tf.convert_to_tensor(pm.data, numpy.float32)
ict=tf.reshape(ict,[1,79,159,1])
result=autoencoder.predict_on_batch(ict)
result=tf.reshape(result,[79,159])
pm.data=unnormalise(result)
# Background, grid and land
ax_map.background_patch.set_facecolor((0.88,0.88,0.88,1))
#mg.background.add_grid(ax_map)
land_img_orig=ax_map.background_img(name='GreyT', resolution='low')
# original pressures as red contours
mg.pressure.plot(ax_map,ic,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='red',
label=False,
linewidths=1)
# Encoded pressures as blue contours
mg.pressure.plot(ax_map,pm,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='blue',
label=False,
linewidths=1)
mg.utils.plot_label(ax_map,
'%04d-%02d-%02d:%02d' % (2009,3,12,6),
facecolor=(0.88,0.88,0.88,0.9),
fontsize=8,
x_fraction=0.98,
y_fraction=0.03,
verticalalignment='bottom',
horizontalalignment='right')
# Scatterplot of encoded v original
ax=fig.add_axes([0.08,0.05,0.45,0.4])
aspect=.225/.4*16/9
# Axes ranges from data
dmin=min(ic.data.min(),pm.data.min())
dmax=max(ic.data.max(),pm.data.max())
dmean=(dmin+dmax)/2
dmax=dmean+(dmax-dmean)*1.05
dmin=dmean-(dmean-dmin)*1.05
if aspect<1:
ax.set_xlim(dmin/100,dmax/100)
ax.set_ylim((dmean-(dmean-dmin)*aspect)/100,
(dmean+(dmax-dmean)*aspect)/100)
else:
ax.set_ylim(dmin/100,dmax/100)
ax.set_xlim((dmean-(dmean-dmin)*aspect)/100,
(dmean+(dmax-dmean)*aspect)/100)
ax.scatter(x=pm.data.flatten()/100,
y=ic.data.flatten()/100,
c='black',
alpha=0.25,
marker='.',
s=2)
ax.set(ylabel='Original',
xlabel='Encoded')
ax.grid(color='black',
alpha=0.2,
linestyle='-',
linewidth=0.5)
# Plot the training history
history_save_file=("%s/Machine-Learning-experiments/"+
"variational_autoencoder/"+
"saved_models/history_to_%04d.pkl") % (
os.getenv('SCRATCH'),500)
history=pickle.load( open( history_save_file, "rb" ) )
ax=fig.add_axes([0.62,0.05,0.35,0.4])
# Axes ranges from data
ax.set_xlim(0,len(history['loss']))
ax.set_ylim(0,numpy.max(numpy.concatenate((history['loss'],
history['val_loss']))))
ax.set(xlabel='Epochs',
ylabel='Loss (grey) and validation loss (black)')
ax.grid(color='black',
alpha=0.2,
linestyle='-',
linewidth=0.5)
ax.plot(range(len(history['loss'])),
history['loss'],
color='grey',
linestyle='-',
linewidth=2)
ax.plot(range(len(history['val_loss'])),
history['val_loss'],
color='black',
linestyle='-',
linewidth=2)
# Render the figure as a png
fig.savefig("comparison_results.png")
| [
"[email protected]"
] | |
a2010a39af08d72a34b058f92fd12104c0aa8d29 | aa0cc19eedf38baca2ecef3de6f2a4c69ce68675 | /clld/scripts/postgres2sqlite.py | 168f94320038122afe286f29dcc8c331998e4f23 | [] | no_license | mitcho/clld | de84c54247138efa53ee5f68a87edc2a0ab06bbf | dcf5f063a44ac5167f677f05b2c66b0d094d4ff3 | refs/heads/master | 2021-01-18T09:56:18.486647 | 2013-08-23T15:13:18 | 2013-08-23T15:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | """
python postgres2sqlite.py apics 2>&1 >/dev/null | less
Unfortunately this approach does not seem to work, thus, our only option is
intialize_db and making sure all db changes are done via alembic migrations.
"""
from subprocess import call
from importlib import import_module
import pkg_resources
import re
from tempfile import mktemp
from path import path
from sqlalchemy import create_engine
from clld.db.meta import Base
def replace_booleans(line):
"""replaces postgres boolean literals with 0|1 within the values in an INSERT
statement as created by pg_dump.
.. note::
- we rely on the INSERT statements not containing newlines.
- we somewhat naively split the values at commas and assume that if a single
token equals "true" or false", it was a boolean value in postgres. Obviously
this assumption does not hold for a text value like "..., true, ...".
We may switch to using sqlparse for a more robust detection of booleans.
>>> assert replace_booleans('INSERT (true, false);').strip() == 'INSERT (1, 0);'
"""
insert, values = line.split('(', 1)
assert values.endswith(');')
values = values[:-2]
clean_values = []
for token in values.split(', '):
if token == 'true':
token = "1"
elif token == 'false':
token = "0"
clean_values.append(token)
return '%s(%s);\n' % (insert, ', '.join(clean_values))
STMT_END = re.compile("([^\']\'|\, [0-9]+)\)\;$")
def inserts(iterator):
"""
>>> assert list(inserts(["INSERT (1, 1);"])) == ['INSERT (1, 1);']
>>> assert list(inserts(["INSERT ('a", "b');"])) == ["INSERT ('a__newline__b');"]
"""
insert = []
for line in iterator:
line = line.strip()
if line.startswith('INSERT '):
if STMT_END.search(line):
yield line
else:
insert = [line]
else:
if insert:
# a continuation line!
insert.append(line)
if STMT_END.search(line):
c = '__newline__'.join(insert)
insert = []
yield c
def convert_dump(i, o): # pragma: no cover
_insert = False
with file(o, 'w') as fp:
fp.write('.echo OFF\n.bail ON\n')
fp.write('BEGIN;\n')
for n, insert in enumerate(inserts(file(i))):
fp.write(replace_booleans(insert))
fp.write('END;\n')
def postgres2sqlite(name): # pragma: no cover
pg_sql = path(mktemp('.sql'))
sqlite_sql = path(mktemp('.sql'))
sqlite = mktemp('.sqlite')
call("pg_dump -f {0} --data-only --inserts {1}".format(pg_sql, name), shell=True)
convert_dump(pg_sql, sqlite_sql)
engine = create_engine('sqlite:////{0}'.format(sqlite))
m = import_module('{0}.models'.format(name))
Base.metadata.create_all(engine)
call('sqlite3 -bail -init {0} {1} ".exit"'.format(sqlite_sql, sqlite), shell=True)
if pg_sql.exists():
pg_sql.remove()
if sqlite_sql.exists():
sqlite_sql.remove()
return sqlite
if __name__ == '__main__': # pragma: no cover
import sys
postgres2sqlite(sys.argv[1])
sys.exit(0)
| [
"[email protected]"
] | |
f60880e5d4192b5bcbd9bd669c188d6935c9d098 | 4bee31f6a823fb1aebbd3dfe1d163aa0b1d41a7c | /seata/registry/FileRegistry.py | 460f4982eb6b95f9f7bcc623f50e55a313c15d63 | [
"Apache-2.0"
] | permissive | rohankumardubey/seata-python | 92532d1e8f8c961f2317aa8c23e2f53fe07711e9 | 66fb3382217a43effa3d1bc5ec2b62204d499dba | refs/heads/master | 2023-08-17T08:29:12.603412 | 2021-09-27T06:04:56 | 2021-09-27T06:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
from seata.config.Config import ConfigFactory
from seata.core.rpc.Address import Address
from seata.registry.Registry import Registry
class FileRegistry(Registry):
config = ConfigFactory.get_config()
def __init__(self):
pass
def register(self, address):
pass
def unregister(self, address):
pass
def subscribe(self, cluster, listener):
pass
def unsubscribe(self, cluster, listener):
pass
def lookup(self, key):
cluster_name = super(FileRegistry, self).get_service_group(key)
if cluster_name is None:
return None
endpoint_str = self.config.get('service.grouplist.' + cluster_name)
endpoints = endpoint_str.split(';')
addresses = []
for endpoint in endpoints:
if endpoint is None or len(endpoint.strip()) == 0:
continue
ip_port_arr = endpoint.split(':')
if len(ip_port_arr) != 2:
raise ValueError('endpoint format should like ip:port')
addresses.append(Address(ip_port_arr[0], int(ip_port_arr[1])))
return addresses
def close(self):
pass
| [
"[email protected]"
] | |
efaf5827b686a2a2c8b12a2e327f2178fa269f5c | 7954d761dde104a9d977006c514ff976a9c88444 | /backend/menu/migrations/0001_initial.py | a6a707da319ae2e8ae9d0ffbe9ae598eb1ac1002 | [] | no_license | crowdbotics-apps/firebase-25585 | 3c693fee6f6e75805fe5b8d40f24ee6b137e29e3 | 5473848fbdad0683030c8f3bd64d03fdc4a1382c | refs/heads/master | 2023-04-05T13:07:26.443879 | 2021-04-09T10:28:31 | 2021-04-09T10:28:31 | 356,229,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | # Generated by Django 2.2.19 on 2021-04-09 10:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('icon', models.URLField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('prefix', models.CharField(max_length=8)),
('flag', models.URLField()),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_category', to='menu.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('review_text', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='menu.Item')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='review_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='ItemVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.FloatField()),
('image', models.URLField()),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_country', to='menu.Country')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_item', to='menu.Item')),
],
),
]
| [
"[email protected]"
] | |
2973c8a04aa45789fe2dd63d8482dcf76c80e95b | 53440fe1e7370b564d3e1161a2a39bd99425f2f7 | /fairing/constants/constants.py | 703f260d7848b679e869cb80c980ec0ea0265a54 | [
"Apache-2.0"
] | permissive | karthikrajkumar/fairing | a89123c0c1385f691bb8d2b301926360c9e70ed3 | 4f9e007365101443e1230ee206980ed6014f7d31 | refs/heads/master | 2020-06-24T01:11:10.950976 | 2019-07-22T03:06:52 | 2019-07-22T03:06:52 | 198,804,843 | 0 | 0 | Apache-2.0 | 2019-07-25T09:51:13 | 2019-07-25T09:51:13 | null | UTF-8 | Python | false | false | 1,125 | py | TEMP_TAR_GZ_FILENAME = '/tmp/fairing.layer.tar.gz'
DEFAULT_IMAGE_NAME = 'fairing-job'
DEFAULT_BASE_IMAGE = 'gcr.io/kubeflow-images-public/fairing:dev'
DEFAULT_REGISTRY = 'index.docker.io'
DEFAULT_DEST_PREFIX = '/app/'
DEFAULT_CONTEXT_FILENAME = '/tmp/fairing.context.tar.gz'
DEFAULT_GENERATED_DOCKERFILE_FILENAME = '/tmp/Dockerfile'
GOOGLE_CREDS_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
GCP_CREDS_SECRET_NAME = 'user-gcp-sa'
AWS_CREDS_SECRET_NAME = 'aws-secret'
DEFAULT_USER_AGENT = 'kubeflow-fairing/{VERSION}'
# Job Constants
JOB_DEFAULT_NAME = 'fairing-job-'
JOB_DEPLOPYER_TYPE = 'job'
# Serving Constants
SERVING_DEPLOPYER_TYPE = 'serving'
#TFJob Constants
TF_JOB_GROUP = "kubeflow.org"
TF_JOB_KIND = "TFJob"
TF_JOB_PLURAL = "tfjobs"
TF_JOB_VERSION = "v1beta2"
TF_JOB_DEFAULT_NAME = 'fairing-tfjob-'
TF_JOB_DEPLOYER_TYPE = 'tfjob'
# KFServing constants
KFSERVING_GROUP = "serving.kubeflow.org"
KFSERVING_KIND = "KFService"
KFSERVING_PLURAL = "kfservices"
KFSERVING_VERSION = "v1alpha1"
KFSERVING_DEFAULT_NAME = 'fairing-kfserving-'
KFSERVING_DEPLOYER_TYPE = 'kfservice'
KFSERVING_CONTAINER_NAME = 'user-container'
| [
"[email protected]"
] | |
a0321890fdf0babae23c4b46e7dca8a0e7afbf90 | 60dff076fae5d36af71af1066ac7eb4f833d2f2f | /tools/ci_build/github/apple/c/assemble_c_pod_package.py | 18dc8a19d23ceffa99f30900c4c998c464d550e2 | [
"MIT"
] | permissive | NervanaSystems/onnxruntime | 79e60f9c6feb8c147868d27de8077a276755cc90 | 96b3c09e2a5e0a5b4f98ed9059a719d9c7b73724 | refs/heads/master | 2023-06-22T02:55:35.250834 | 2023-01-03T22:54:46 | 2023-01-03T22:54:46 | 162,268,647 | 1 | 3 | MIT | 2021-01-14T12:56:23 | 2018-12-18T10:09:13 | C++ | UTF-8 | Python | false | false | 2,687 | py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import pathlib
import shutil
import sys
_script_dir = pathlib.Path(__file__).parent.resolve(strict=True)
sys.path.append(str(_script_dir.parent))
from package_assembly_utils import ( # noqa: E402
copy_repo_relative_to_dir, gen_file_from_template, load_framework_info)
def parse_args():
parser = argparse.ArgumentParser(description="""
Assembles the files for the C/C++ pod package in a staging directory.
This directory can be validated (e.g., with `pod lib lint`) and then zipped to create a package for release.
""")
parser.add_argument("--staging-dir", type=pathlib.Path,
default=pathlib.Path("./onnxruntime-mobile-c-staging"),
help="Path to the staging directory for the C/C++ pod files.")
parser.add_argument("--pod-version", required=True,
help="C/C++ pod version.")
parser.add_argument("--framework-info-file", type=pathlib.Path, required=True,
help="Path to the framework_info.json file containing additional values for the podspec. "
"This file should be generated by CMake in the build directory.")
parser.add_argument("--framework-dir", type=pathlib.Path, required=True,
help="Path to the onnxruntime.framework directory to include in the pod.")
return parser.parse_args()
def main():
args = parse_args()
framework_info = load_framework_info(args.framework_info_file.resolve())
staging_dir = args.staging_dir.resolve()
print(f"Assembling files in staging directory: {staging_dir}")
if staging_dir.exists():
print("Warning: staging directory already exists", file=sys.stderr)
# copy the necessary files to the staging directory
framework_dir = args.framework_dir.resolve()
shutil.copytree(framework_dir, staging_dir / framework_dir.name, dirs_exist_ok=True)
copy_repo_relative_to_dir(["LICENSE"], staging_dir)
# generate the podspec file from the template
variable_substitutions = {
"VERSION": args.pod_version,
"IOS_DEPLOYMENT_TARGET": framework_info["IOS_DEPLOYMENT_TARGET"],
"WEAK_FRAMEWORK": framework_info["WEAK_FRAMEWORK"],
"LICENSE_FILE": '"LICENSE"',
}
podspec_template = _script_dir / "onnxruntime-mobile-c.podspec.template"
podspec = staging_dir / "onnxruntime-mobile-c.podspec"
gen_file_from_template(podspec_template, podspec, variable_substitutions)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
dd97094e0e53418b16229ca0ca1a5efacd5e520f | 1b53325f6976bd2697f1d9678054b8a1e5dd059c | /update/without_expansion/2.run_calculate_concept_map.py | d0f902e4761716435b798ad4bda40a5255298bc5 | [
"MIT"
] | permissive | vsoch/semantic-image-comparison | d34150b4fed36d55f934e727297ee188951e3ed9 | ab029ad124fc6d6e7ae840c24a8e9471d8737525 | refs/heads/master | 2020-04-06T07:04:21.726094 | 2016-08-13T23:13:10 | 2016-08-13T23:13:10 | 48,921,431 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #!/usr/bin/python
from glob import glob
import sys
import pandas
import os
# Classification framework
# for image1 in all images:
# for image2 in allimages:
# if image1 != image2:
# hold out image 1 and image 2, generate regression parameter matrix using other images
# generate predicted image for image 1 [PR1]
# generate predicted image for image 2 [PR2]
# classify image 1 as fitting best to PR1 or PR2
# classify image 2 as fitting best to PR1 or PR2
base = sys.argv[1]
update = "%s/update" %base
output_folder = "%s/classification" %update # any kind of tsv/result file
results = "%s/results" %update # any kind of tsv/result file
for x in [output_folder,results]:
if not os.path.exists(x):
os.mkdir(x)
# Images by Concepts data frame (NOT including all levels of ontology)
labels_tsv = "%s/concepts_binary_df.tsv" %update
image_lookup = "%s/image_nii_lookup.pkl" %update
df = pandas.read_csv(labels_tsv,sep="\t",index_col=0)
for image1_holdout in df.index.tolist():
print "Parsing %s" %(image1_holdout)
for image2_holdout in df.index.tolist():
if (image1_holdout != image2_holdout) and (image1_holdout < image2_holdout):
output_file = "%s/%s_%s_predict.pkl" %(output_folder,image1_holdout,image2_holdout)
if not os.path.exists(output_file):
job_id = "%s_%s" %(image1_holdout,image2_holdout)
filey = ".job/class_%s.job" %(job_id)
filey = open(filey,"w")
filey.writelines("#!/bin/bash\n")
filey.writelines("#SBATCH --job-name=%s\n" %(job_id))
filey.writelines("#SBATCH --output=.out/%s.out\n" %(job_id))
filey.writelines("#SBATCH --error=.out/%s.err\n" %(job_id))
filey.writelines("#SBATCH --time=2-00:00\n")
filey.writelines("#SBATCH --mem=32000\n")
filey.writelines("python 2.calculate_concept_map.py %s %s %s %s %s" %(image1_holdout, image2_holdout, output_file, labels_tsv, image_lookup))
filey.close()
os.system("sbatch -p russpold --qos russpold " + ".job/class_%s.job" %(job_id))
| [
"[email protected]"
] | |
f44574379435b1f2cd4ce38956cd022587c8a169 | f64fde1c4ae338987b76c10c1029468143f1d83a | /Test_programs/stacking_arm/main.py | 86a75d8333a3fe74d564dc64820892d75fccba01 | [] | no_license | abhijithneilabraham/Project-ANTON | 56a21941042034c9c2b407e25d4e75925a158e71 | 03478d9c9a537c2507a06e3c022a1092587cdc06 | refs/heads/master | 2023-04-01T21:01:14.568164 | 2020-05-01T14:19:24 | 2020-05-01T14:19:24 | 203,203,760 | 2 | 0 | null | 2023-03-24T22:42:40 | 2019-08-19T15:52:11 | Python | UTF-8 | Python | false | false | 1,285 | py | """
Make it more robust.
Stop episode once the finger stop at the final position for 50 steps.
Feature & reward engineering.
"""
from env import ArmEnv
from rl import DDPG
MAX_EPISODES = 900
MAX_EP_STEPS = 200
ON_TRAIN = False
# set env
env = ArmEnv()
s_dim = env.state_dim
a_dim = env.action_dim
a_bound = env.action_bound
rl = DDPG(a_dim, s_dim, a_bound)
steps = []
print(s_dim)
def train():
# start training
for i in range(MAX_EPISODES):
s = env.reset()
ep_r = 0.
for j in range(MAX_EP_STEPS):
# env.render()
a = rl.choose_action(s)
s_, r, done = env.step(a)
rl.store_transition(s, a, r, s_)
ep_r += r
if rl.memory_full:
# start to learn once has fulfilled the memory
rl.learn()
s = s_
if done or j == MAX_EP_STEPS-1:
print('Ep: %i | %s | ep_r: %.1f | step: %i' % (i, '---' if not done else 'done', ep_r, j))
break
rl.save()
def eval():
rl.restore()
env.render()
env.viewer.set_vsync(True)
s = env.reset()
while True:
env.render()
a = rl.choose_action(s)
s, r, done = env.step(a)
#if ON_TRAIN:
# train()
#else:
# eval()
| [
"[email protected]"
] | |
96a2c8ceb28ab064438abaa8b14ad96c713bff9c | b1d921644161105c3fa12d51702565a22b3e0d1e | /typeidea/blog/migrations/0001_initial.py | 84095c3a37f3779d83ece9dee0a3985fb3718f2e | [] | no_license | FATE-0/blog | 01e74a1f105ea2fc1b27e69be376ce4270e32f13 | fca878f68f8dc67a4e8b75d9c8f109d6e820375d | refs/heads/master | 2020-06-19T10:17:35.152719 | 2019-07-19T11:17:26 | 2019-07-19T11:17:26 | 196,675,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | # Generated by Django 2.2.3 on 2019-07-14 08:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('is_nav', models.BooleanField(default=False, verbose_name='是否为导航')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '分类',
'verbose_name_plural': '分类',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '标签',
'verbose_name_plural': '标签',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='标题')),
('desc', models.CharField(blank=True, max_length=1024, verbose_name='摘要')),
('content', models.TextField(help_text='正文必须为 MarkDown 格式', verbose_name='正文')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除'), (2, '草稿')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='分类')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
('tag', models.ManyToManyField(to='blog.Tag', verbose_name='标签')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ['-id'],
},
),
]
| [
"[email protected]"
] | |
a264914ada26cf2cef65b45470569fb9c72b51bb | 01dc09fdf4a9203da336b893650235f16ff5380f | /Backtest/Historical_BackTest/Neat/tf_neat-trader-intraday/no_hidden_layer/Tech_Input/simple/genome_test.py | 91c0fbe7c5d8937396ad29d1897557fa3872d7e4 | [] | no_license | webclinic017/RayTrader_v3 | 2b15228881bf7a08e90682a2364905317c282f65 | 2ea39946a2654dbc3b05b41abcaf5a4a4082a1b6 | refs/heads/master | 2023-03-16T04:40:41.392465 | 2019-06-04T04:46:46 | 2019-06-04T04:46:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | import glob
import multiprocessing
import trader_env
import trader_data
import visualize
import reporter
from statistics import mean
import numpy as np
import neat
import pickle
import matplotlib.pyplot as plt
file_name = "G:\\AI Trading\\Code\\RayTrader_v3\\HistoricalData\\Min_data\\ADANIPORTS-EQ.csv"
data = trader_data.csv_to_df(file_name)
train_data, test_data = trader_data.split_data(data)
env = trader_env.Weighted_Unrealized_BS_Env(train_data)
max_env_steps = len(env.data) - env.t - 1
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config.cfg')
def eval_genome(genome, config):
global env, max_env_steps
ob = env.reset()
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
counter = 0
step = 0
step_max = max_env_steps
done = False
while not done:
# inputs = trader_data.get_inputs(signals, step)
nnOutput = net.activate(ob)
ob, rew, done, _ = env.step(np.argmax(nnOutput))
# print("id",genome_id,"Step:",step,"act:",np.argmax(nnOutput),"reward:",rew)
fitness_current += rew
step += 1
if fitness_current > current_max_fitness:
current_max_fitness = fitness_current
counter = 0
else:
counter += 1
if step >= step_max:
done = True
if done or env.amt<=0:
done = True
print("Genome id#: ", genome.key)
message = "Fitness :{} Max Fitness :{} Avg Daily Profit :{} %".format(fitness_current,
current_max_fitness,
round(mean(env.daily_profit_per), 3))
print("Initial Value: ",2000)
print("Final Value: ",env.amt)
print("Days: ",len(env.daily_profit_per))
print(message)
plt.title(genome.key)
plt.plot(env.daily_profit_per)
plt.show()
# logger.info(message)
genome.fitness = fitness_current
def run_tests(genome):
global env, max_env_steps, config
env = trader_env.Weighted_Unrealized_BS_Env(train_data)
max_env_steps = len(env.data) - env.t - 1
eval_genome(genome,config)
env = trader_env.Weighted_Unrealized_BS_Env(test_data)
max_env_steps = len(env.data) - env.t - 1
eval_genome(genome,config)
def run_files(files_set):
for genomeFile in files_set:
genome = pickle.load(open(genomeFile, 'rb'))
run_tests(genome)
print("#"*50)
def chunks(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
# Load all the genomes
files = glob.glob(".\\genomes\\*.pkl")
n_processes = 3
threads = []
if __name__ == "__main__":
# divide the file-list
chunks_list = chunks(files, n_processes)
for i in range(n_processes):
threads.append(multiprocessing.Process(target=run_files, args=(chunks_list[i],)))
# start all threads
for t in threads:
t.start()
# Join all threads
for t in threads:
t.join()
#
# if __name__ == "__main__":
# genomeFile = '.\\genomes\\594.pkl'
# genome = pickle.load(open(genomeFile, 'rb'))
# run_tests(genome)
| [
"[email protected]"
] | |
487890ec6dfa248593a93530920bc2c0b559b453 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/contrib/messages/storage/base.py | fd5d0c24aa8037c6beb35ed14e85fda6851aa798 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 5,643 | py | from django.conf import settings
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message:
"""
Represent an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepare the message for serialization by forcing the ``message``
and ``extra_tags`` to str in case they are lazy translations.
"""
self.message = str(self.message)
self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return str(self.message)
@property
def tags(self):
return ' '.join(tag for tag in [self.extra_tags, self.level_tag] if tag)
@property
def level_tag(self):
return LEVEL_TAGS.get(self.level, '')
class BaseStorage:
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Return a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieve a list of stored messages. Return a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages and return a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepare a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Store all unread messages.
If the backend has yet to be iterated, store previously stored messages
again. Otherwise, only store messages added after the last iteration.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queue a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Return the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Set a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| [
"[email protected]"
] | |
5b6746bc96796294065d58ec98028daa3d44bbf9 | 2f5ab43956b947b836e8377370d786e5ee16e4b0 | /sklearn2code/sym/test/test_printers.py | d1f8d27e37ac139c656be81f1359268ce15271d4 | [
"MIT"
] | permissive | modusdatascience/sklearn2code | b175fb268fa2871c95f0e319f3cd35dd54561de9 | 3ab82d82aa89b18b18ff77a49d0a524f069d24b9 | refs/heads/master | 2022-09-11T06:16:37.604407 | 2022-08-24T04:43:59 | 2022-08-24T04:43:59 | 115,747,326 | 4 | 2 | MIT | 2018-05-01T00:11:51 | 2017-12-29T19:05:03 | Python | UTF-8 | Python | false | false | 874 | py | from sklearn2code.sym.expression import FiniteMap, Integer, false, true,\
IntegerVariable, RealPiecewise, RealNumber
from sklearn2code.sym.printers import JavascriptPrinter
from nose.tools import assert_equal
def test_javascript_finite_map():
expr = FiniteMap({Integer(0): false, Integer(1): true}, IntegerVariable('x'))
assert_equal(JavascriptPrinter()(expr), '(x===0?false:(x===1?true:null))')
def test_javascript_piecewise():
expr = RealPiecewise((RealNumber(0), false), (RealNumber(1), true))
assert_equal(JavascriptPrinter()(expr), '(false?0.0:(true?1.0:null))')
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| [
"[email protected]"
] | |
1a01f5c2747cdd429c329c7250f34280b5f686d2 | 412b699e0f497ac03d6618fe349f4469646c6f2d | /env/lib/python3.8/site-packages/web3/_utils/threads.py | ba45d8775e0e35fd72ae6117133e9d50ea23bdc3 | [
"MIT"
] | permissive | EtienneBrJ/Portfolio | 7c70573f02a5779f9070d6d9df58d460828176e3 | 6b8d8cf9622eadef47bd10690c1bf1e7fd892bfd | refs/heads/main | 2023-09-03T15:03:43.698518 | 2021-11-04T01:02:33 | 2021-11-04T01:02:33 | 411,076,325 | 1 | 0 | MIT | 2021-10-31T13:43:09 | 2021-09-27T23:48:59 | HTML | UTF-8 | Python | false | false | 3,979 | py | """
A minimal implementation of the various gevent APIs used within this codebase.
"""
import threading
import time
from types import (
TracebackType,
)
from typing import (
Any,
Callable,
Generic,
Type,
)
from web3._utils.compat import (
Literal,
)
from web3.types import (
TReturn,
)
class Timeout(Exception):
"""
A limited subset of the `gevent.Timeout` context manager.
"""
seconds = None
exception = None
begun_at = None
is_running = None
def __init__(
self, seconds: float = None, exception: Type[BaseException] = None, *args: Any,
**kwargs: Any
) -> None:
self.seconds = seconds
self.exception = exception
def __enter__(self) -> 'Timeout':
self.start()
return self
def __exit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> Literal[False]:
return False
def __str__(self) -> str:
if self.seconds is None:
return ''
return "{0} seconds".format(self.seconds)
@property
def expire_at(self) -> int:
if self.seconds is None:
raise ValueError("Timeouts with `seconds == None` do not have an expiration time")
elif self.begun_at is None:
raise ValueError("Timeout has not been started")
return self.begun_at + self.seconds
def start(self) -> None:
if self.is_running is not None:
raise ValueError("Timeout has already been started")
self.begun_at = time.time()
self.is_running = True
def check(self) -> None:
if self.is_running is None:
raise ValueError("Timeout has not been started")
elif self.is_running is False:
raise ValueError("Timeout has already been cancelled")
elif self.seconds is None:
return
elif time.time() > self.expire_at:
self.is_running = False
if isinstance(self.exception, type):
raise self.exception(str(self))
elif isinstance(self.exception, Exception):
raise self.exception
else:
raise self
def cancel(self) -> None:
self.is_running = False
def sleep(self, seconds: float) -> None:
time.sleep(seconds)
self.check()
class ThreadWithReturn(threading.Thread, Generic[TReturn]):
def __init__(
self, target: Callable[..., TReturn] = None, args: Any = None, kwargs: Any = None
) -> None:
super().__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self) -> None:
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout: float = None) -> TReturn:
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
class TimerClass(threading.Thread):
def __init__(self, interval: int, callback: Callable[..., Any], *args: Any) -> None:
threading.Thread.__init__(self)
self.callback = callback
self.terminate_event = threading.Event()
self.interval = interval
self.args = args
def run(self) -> None:
while not self.terminate_event.is_set():
self.callback(*self.args)
self.terminate_event.wait(self.interval)
def stop(self) -> None:
self.terminate_event.set()
def spawn(
target: Callable[..., TReturn],
*args: Any,
thread_class: Type[ThreadWithReturn[TReturn]] = ThreadWithReturn,
**kwargs: Any,
) -> ThreadWithReturn[TReturn]:
thread = thread_class(
target=target,
args=args,
kwargs=kwargs,
)
thread.daemon = True
thread.start()
return thread
| [
"[email protected]"
] | |
d5439756a472a776f6e2de4f77152fbc8854b8cf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/280/97935/submittedfiles/testes.py | d88ecb76f5e989a6ee41f07dc266207edd3ddf88 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | """
valor=["X","O"]
symh=valor[0]
sympc=valor[1]
print(symh)
print(sympc)
line1=[" "," "," "]
line2=[" "," "," "]
line3=[" "," "," "]
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line1[2]=symh
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line2[1]=sympc
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line3[2]=symh
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
"""
"""
x=int(input("Número de médias: "))
while x <= 1:
x=int(input("Número de médias: "))
notas=[]
for i in range (0,x,1):
notas.append(float(input("Insira a nota %d: " %(i+1))))
soma=sum(notas)
res=soma/x
print(res)
"""
"""
n=int(input("Insira n: "))
a=[]
for i in range (0,n,1):
a.append(int(input("Digite o termo %d do vetor a: " %(i+1))))
med=sum(a)/len(a)
somat=0
for i in range (0,len(a),1):
somat=somat + ((a[i]-med)**2)
desvpad=(((1/(n-1))*(somat))**0.5)
print(desvpad)
"""
import numpy as np
cont1=0
cont2=0
cont3=0
dim=int(input("Dimensão n da matriz: "))
matriz=np.empty([dim,dim])
matriztrans=np.empty([dim,dim])
matrizdiag=np.empty([2,dim])
for i in range (0,dim,1):
for j in range (0,dim,1):
matriz[i][j]=float(input("Digite o nº da linha %d na coluna %d: " ))
#transposta
for i in range(0,dim,1):
for j in range(0,dim,1):
matriztrans[i][j] = matriz[j][i]
#diagonais
for i in range(0,dim,1):
matrizdiag[0][i]=matriz[i][i]
for i in range(0,dim,1):
for j in range(dim-1,0,-1):
matrizdiag[1]=matriz[i][j]
print(matriz)
print(matriztrans)
print(matrizdiag)
for i in range (0,dim-1,1):
if sum(matriz[i]) == sum(matriz[i+1]):
cont1=cont1+1
for i in range (0,dim-1,1):
if sum(matriztrans[i]) == sum(matriz[i+1]):
cont2=cont2+1
for i in range (0,dim-1,1):
if matriz[i][i] == sum(matriz[i+1]):
cont3=cont3+1 | [
"[email protected]"
] | |
3b86e81c1aefa746ea0b2327c9bc1e620689dd0a | 7a013424c82b71bc82aa312e0165a1af4170ac23 | /ABC/ABC173/C.py | c0f86d46455b822b965fac48b703f8bf73750487 | [] | no_license | kikugawa-shoma/Atcoder | fe3405e36dd3e4e25127b6110d6009db507e7095 | 7299116b7beb84815fe34d41f640a2ad1e74ba29 | refs/heads/master | 2020-12-21T19:10:12.471507 | 2020-10-10T16:38:18 | 2020-10-10T16:38:18 | 236,531,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import copy
H,W,K = map(int,input().split())
C = [list(input()) for _ in range(H)]
M = [[0]*W for _ in range(H)]
for i in range(H):
for j in range(W):
if C[i][j] == "#":
M[i][j] = 1
def bit_01(keta):
ans = []
for i in range(2**(keta)):
ans.append("".join(["{:0", str(keta), "b}"]).format(i))
return ans
vert = bit_01(H)
hori = bit_01(W)
def check(v,h,M):
M = copy.deepcopy(M)
for i in range(len(v)):
if v[i] == "1":
for ii in range(W):
M[i][ii] = 0
for j in range(len(h)):
if h[j] == "1":
for jj in range(H):
M[jj][j] = 0
S = 0
for i in range(W):
for j in range(H):
S += M[j][i]
return S == K
ans = 0
for vp in vert:
for hp in hori:
if check(vp,hp,M):
ans += 1
print(ans)
| [
"[email protected]"
] | |
9176d3e53da70f0692fbab648cb4c76f58216f6d | 059c4606fd93b70c244a0017cc1727d1b951e75a | /5-packages/http-examples/httpie-notes/httpie/context.py | c0840c9d051252a44b25937acfd607e94db2b7e7 | [
"BSD-3-Clause"
] | permissive | andyguwc/python-resources | 1f6850b1fde243912644530ee8985ae09773c68e | d8ab7e54d287a697e4763a36b10136af461ec820 | refs/heads/master | 2021-06-24T13:30:25.196129 | 2021-03-02T03:11:49 | 2021-03-02T03:11:49 | 210,958,803 | 1 | 1 | null | 2019-10-25T03:12:31 | 2019-09-25T23:29:29 | Python | UTF-8 | Python | false | false | 3,005 | py | import os
import sys
from pathlib import Path
from typing import Union, IO, Optional
try:
import curses
except ImportError:
curses = None # Compiled w/o curses
from httpie.compat import is_windows
from httpie.config import DEFAULT_CONFIG_DIR, Config, ConfigFileError
from httpie.utils import repr_dict
# use this to manage all things environment related
class Environment:
"""
Information about the execution context
(standard streams, config directory, etc).
By default, it represents the actual environment.
All of the attributes can be overwritten though, which
is used by the test suite to simulate various scenarios.
"""
is_windows: bool = is_windows
config_dir: Path = DEFAULT_CONFIG_DIR
stdin: Optional[IO] = sys.stdin
stdin_isatty: bool = stdin.isatty() if stdin else False
stdin_encoding: str = None
stdout: IO = sys.stdout
stdout_isatty: bool = stdout.isatty()
stdout_encoding: str = None
stderr: IO = sys.stderr
stderr_isatty: bool = stderr.isatty()
colors = 256
program_name: str = 'http'
def __init__(self, **kwargs):
"""
Use keyword arguments to overwrite
any of the class attributes for this instance.
"""
# making sure all the keyword args are actually attributes of this class
assert all(hasattr(type(self), attr) for attr in kwargs.keys())
self.__dict__.update(**kwargs) # easy way to update all attributes
# Keyword arguments > stream.encoding > default utf8
if self.stdin and self.stdin_encoding is None:
self.stdin_encoding = getattr(
self.stdin, 'encoding', None) or 'utf8'
if self.stdout_encoding is None:
actual_stdout = self.stdout
self.stdout_encoding = getattr(
actual_stdout, 'encoding', None) or 'utf8'
def __str__(self):
defaults = dict(type(self).__dict__)
actual = dict(defaults)
actual.update(self.__dict__)
actual['config'] = self.config
return repr_dict({
key: value
for key, value in actual.items()
if not key.startswith('_')
})
def __repr__(self):
return f'<{type(self).__name__} {self}>'
_config = None # this is a cache for config
# core part of Environment
# Support loading config from the config file directory https://httpie.org/doc#config-file-directory
@property
def config(self) -> Config:
config = self._config
if not config:
self._config = config = Config(directory=self.config_dir)
if not config.is_new():
try:
config.load()
except ConfigFileError as e:
self.log_error(e, level='warning')
def log_error(self, msg, level='error'):
assert level in ['error', 'warning']
self.stderr.write(f'\n{self.program_name}: {level}: {msg}\n\n')
| [
"[email protected]"
] | |
05eacae54547837444451aba6a9ab0c685add15e | 03198f075072bfb9d5c5afab2fef99d3ec5f37db | /source/api_v2/serializers/advert.py | 8c9cf5e5ce4d0f747676fb2b5908d2bbc2e61240 | [] | no_license | Azer-Denker/Ex_12 | 2c402dffddbf726bfaab61f5022ea0cf6b6b3562 | 97d4eda2d621163c6e12ea388569b50157d09fd5 | refs/heads/main | 2023-07-14T19:05:39.763400 | 2021-08-21T13:30:31 | 2021-08-21T13:30:31 | 398,558,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from rest_framework import serializers
from webapp.models import Advert
class AdvertSerializer(serializers.ModelSerializer):
class Meta:
model = Advert
fields = ('id', 'title', 'text', 'author', 'created_at')
read_only_fields = ('author', 'id')
def create(self, validated_data):
return Advert.objects.create(**validated_data)
def update(self, instance, validated_data):
for field, value in validated_data.items():
setattr(instance, field, value)
instance.save()
return instance
def delete(self, instance):
instance.delete()
return instance.pk
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.