blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
977aa0a76af026f61c509844bb37c9a7e0e2603a | eb7c15f59f0863b457b272849930dce4ef92e58c | /znc/run | 09e03a3c11edded93c1dd153409c21e45d5db281 | [] | no_license | dozymoe/runit-init-scripts | 40492bc11b9a7f5f974088e7b5e870d97f54354a | ddb8915c6f2da8a5c2acdb5e09f33bc6c027ccdb | refs/heads/master | 2021-01-22T07:06:29.736090 | 2014-06-19T01:32:08 | 2014-06-19T01:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | #!/usr/bin/env python
import logging
import os
import sys
from time import sleep
# import external library
sys.path.append('/var/service')
from runit_helper import (
MAXIMUM_CRASHES_DELAY,
check_crash_quota,
check_dependencies,
get_logger,
run,
)
service_name = 'znc'
log = get_logger(service_name, logging.INFO)
check_dependencies(service_name, log)
if check_crash_quota(service_name):
sleep(MAXIMUM_CRASHES_DELAY)
exit(0)
log.info('starting..')
run('/usr/bin/znc', ['--foreground'])
| [
"[email protected]"
] | ||
4b64a051e30b954139e58857c0e08c141725d3be | 8f1d6f17d3bdad867518b7b0a164adfe6aeeed95 | /recognition/vpl/backbones/iresnet.py | c6d3b9c240c24687d432197f976ee01fbf423216 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | xwyangjshb/insightface | 2c7f030a5d1f5a24b18967bd0d775ee33933d37f | ae233babaf7614ef4ef28dac0171205835d78d64 | refs/heads/master | 2022-09-29T07:49:22.944700 | 2022-09-22T11:36:12 | 2022-09-22T11:36:12 | 221,020,460 | 1 | 0 | MIT | 2019-11-11T16:16:56 | 2019-11-11T16:16:55 | null | UTF-8 | Python | false | false | 7,149 | py | import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2])
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs)
| [
"[email protected]"
] | |
b65b8f7c48e21d63843b88ce2832a2d666bf33d7 | 32f1d0e9c2fbce7f4682b9f79cae5f3df0480de0 | /brevets/flask_brevets.py | ff59123f5a991747db42de10588f90ef1a270ae0 | [
"Artistic-2.0"
] | permissive | UO-CIS-322/proj4-brevets | b0546b3e47db78c74b4c35b52c5527c811eb8ad0 | a1600206886d324eaa3975f561ae6c7fff601b82 | refs/heads/master | 2021-01-21T21:32:21.088892 | 2017-10-13T21:29:38 | 2017-10-13T21:29:38 | 43,849,637 | 0 | 75 | null | 2017-10-22T04:51:19 | 2015-10-07T23:01:01 | Python | UTF-8 | Python | false | false | 1,854 | py | """
Replacement for RUSA ACP brevet time calculator
(see https://rusa.org/octime_acp.html)
"""
import flask
from flask import request
import arrow # Replacement for datetime, based on moment.js
import acp_times # Brevet time calculations
import config
import logging
###
# Globals
###
app = flask.Flask(__name__)
CONFIG = config.configuration()
app.secret_key = CONFIG.SECRET_KEY
###
# Pages
###
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Main page entry")
return flask.render_template('calc.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("index")
return flask.render_template('404.html'), 404
###############
#
# AJAX request handlers
# These return JSON, rather than rendering pages.
#
###############
@app.route("/_calc_times")
def _calc_times():
"""
Calculates open/close times from miles, using rules
described at https://rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request")
km = request.args.get('km', 999, type=float)
app.logger.debug("km={}".format(km))
app.logger.debug("request.args: {}".format(request.args))
# FIXME: These probably aren't the right open and close times
# and brevets may be longer than 200km
open_time = acp_times.open_time(km, 200, arrow.now().isoformat)
close_time = acp_times.close_time(km, 200, arrow.now().isoformat)
result = {"open": open_time, "close": close_time}
return flask.jsonify(result=result)
#############
app.debug = CONFIG.DEBUG
if app.debug:
app.logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
| [
"[email protected]"
] | |
859f53a675da269d458e7153e908f2527223ac15 | bf534da18426b49dbee0a0b1870f5f3a85922855 | /ex023.py | 81005cc6e332246276e30c09bd341672794200b7 | [] | no_license | kcpedrosa/Python-exercises | 0d20a72e7e68d9fc9714e3aabf4850fdbeb7d1f8 | ae35dfad869ceb3aac186fce5161cef8a77a7579 | refs/heads/master | 2021-05-20T08:46:29.318242 | 2020-04-01T15:44:36 | 2020-04-01T15:44:36 | 252,205,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | num = int(input('Digite um numero qualquer: '))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print('Analisando o numero {}'.format(num))
print('A unidade vale {}'.format(u))
print('A dezena vale {}'.format(d))
print('A centena vale {}'.format(c))
print('A milhar vale {}'.format(m)) | [
"[email protected]"
] | |
994c0795da16cdc04ade8acbce51229b95fa4e8e | 5527d3854ad0840fb4a0a9893447535cd5e6ad0f | /python/ThirteenTeV/QstarToQW_M_1200_TuneCUETP8M1_13TeV_pythia8_cfi.py | 58d9d33c62bab8fd0ee915374feb779697103556 | [] | no_license | danbarto/genproductionsSummer16 | ecf2309c1627b4db3e4a1b8785ca612d9a59426f | 655ef31aa5f05d0117aeef82d107f07a1fd5d822 | refs/heads/master | 2020-03-26T23:12:37.115369 | 2018-08-21T14:23:30 | 2018-08-21T14:23:30 | 145,520,233 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(65.84),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExcitedFermion:dg2dStar = on',
'ExcitedFermion:ug2uStar = on',
'ExcitedFermion:Lambda = 1200',
'4000001:m0 = 1200',
'4000001:onMode = off',
'4000001:onIfMatch = 2 24',
'4000002:m0 = 1200',
'4000002:onMode = off',
'4000002:onIfMatch = 1 24',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
ddc32b1926560d046349ee35ff5707643abd8afe | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/_exercises/_templates/temp/Mastering GUI Programming with Python/Chapter 3 Handling Events with Signals and Slots/signal_slots_demo.py | f79d2febefd50d50434b21a86eb7d099cee6be09 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,488 | py | # ______ ___
# ____ ? ______ ?W.. __ qtw
# ____ ? ______ ?C.. __ qtc
#
#
# c_ MainWindow ?.?W..
#
# ___ -
# s_. -
# sL.. ?.?VBL..
#
# # connecting a signal to a slot
# quitbutton _ ?.?PB.. Quit
# ?.c__.c.. cl..
# la__ .aW.. ?
#
# # connecting a signal with data to a slot that receives data
# entry1 _ ?.?LE..
# entry2 _ ?.?LE..
# la__ .aW.. ?
# la__ .aW.. ?
# _1.tC...c.. _2.sT.
#
# # connecting a signal to a python callable
# _2.tC...c.. pr..
#
# # Connecting a signal to another signal
# _1.eF__.c.. l___ print editing finished
# _2.rP__.c.. _1.eF__
#
# # This call will fail, because the signals have different argument types
# #self.entry1.textChanged.connect(self.quitbutton.clicked)
#
# # This won't work, because of signal doesn't send enough args
# badbutton _ ?.?PB.. Bad
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
# # This will work, even though the signal sends extra args
# goodbutton _ ?.?PB.. Good
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
#
# s..
#
# ___ needs_args arg1, arg2, arg3
# p..
#
# ___ no_args
# print('I need no arguments')
#
# __ ______ __ ______
# app _ ?.?A.. ___.a..
# # it's required to save a reference to MainWindow.
# # if it goes out of scope, it will be destroyed.
# mw _ ?
# ___.e.. ?.e..
| [
"[email protected]"
] | |
388a6eb4b8b486a5c9c706692097b3b4c38187c7 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/_config/display.py | 57b7af184346cd2f68442d22a2bd7a489047ecad | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f6ba130797f4f1ce2395562efa48f788ebd3a352e26f7c79209f476a3d300866
size 1756
| [
"[email protected]"
] | |
4592366353bb1a72dfd875e0dfdbd622612baa2b | ef84f06e845d5c42aae2faee84c263f9eb42d92d | /keen/web/views/api/user.py | 46bd2b750294c76a1ca60d1ba6b84a5b3139654b | [] | no_license | beforebeta/keensmb | 0921473df4e92e366695cc03c9fdef96a3be4075 | 5408a42a16c83558229f62c88eec011231a0a797 | refs/heads/master | 2016-09-16T00:37:08.907191 | 2014-03-24T15:31:11 | 2014-03-24T15:31:11 | 14,530,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | import logging
from uuid import uuid1
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import ensure_csrf_cookie
from rest_framework.decorators import api_view
from rest_framework.response import Response
from keen.core.models import ClientUser
from keen.web.models import TrialRequest
from keen.web.forms import TrialRequestForm
from keen.web.serializers import ClientSerializer
from keen.tasks import send_email, mailchimp_subscribe
from tracking.models import Visitor
logger = logging.getLogger(__name__)
@ensure_csrf_cookie
@api_view(['POST'])
def login_view(request):
try:
email = request.DATA['email']
password = request.DATA['password']
except KeyError:
logger.warn('Request is missing email and/or password parameters: %r' % request.DATA)
return HttpResponseBadRequest('Missing authentication information')
user = authenticate(username=email, password=password)
logger.debug('Authenticate %r' % locals())
if user:
login(request, user)
try:
request.session['client_slug'] = ClientUser.objects.get(
user=user).client.slug
except ClientUser.DoesNotExist:
request.session['client_slug'] = None
request.session.save()
else:
request.session.save()
return Response({'success': 'Thank you for signing-in!'})
return Response({'error': 'Invalid e-mail/pasword combination'})
@ensure_csrf_cookie
@api_view(['GET'])
def logout_view(request):
request.session.pop('client_slug', None)
logout(request)
return HttpResponseRedirect(reverse('home'))
@ensure_csrf_cookie
@api_view(['POST'])
def request_free_trial(request):
form = TrialRequestForm(request.DATA)
if form.is_valid():
trial_request = TrialRequest(**form.cleaned_data)
trial_request.source = request.session.get('landing_page')
if 'visitor' in request.session:
try:
trial_request.visitor = Visitor.objects.get(
pk=request.session['visitor'])
except Visitor.DoesNotExist:
logger.error('Visitor does not exist')
try:
trial_request.save()
except DatabaseError:
logger.exception('Failed to save free trial request')
# FIXME: should we return an error?
# for now lets pretend all went well
email = trial_request.email or 'ignore+{0}@keensmb.com'.format(uuid1().hex)
mailchimp_subscribe.delay(
'aba1a09617',
email,
{
'EMAIL': email,
'NAME': trial_request.name or '',
'BIZNAME': trial_request.business or '',
'NUMBER': trial_request.phone or '',
'REFERRAL': trial_request.question or '',
'QUESTIONS': trial_request.comments or '',
},
double_optin=False,
update_existing=True,
send_welcome=False,
)
send_email.delay(
'Free Trial Request',
'''
Name: {0.name}
Business name: {0.business}
Phone number: {0.phone}
Email: {0.email}
Referral: {0.question}
Questions: {0.comments}
'''.format(trial_request),
['[email protected]'],
)
result = {
'success': 'We will be in touch shortly',
}
else:
result = {
'errors': form.errors,
}
return Response(result)
| [
"[email protected]"
] | |
e2070525c866d5c13ea17979887ad320706aefe9 | b3e7a3d1e5d50af82b60e6d7b8afa4a077a040ad | /main2.py | 5f83b2d48ceebcd188e66f2ed0f7efb1c605281a | [] | no_license | Namenaro/cheini | d317fb0a6396bf038629490231a175c62e2e6011 | 3b14b58030d1f910265da8c1b859742149df4f6f | refs/heads/master | 2021-05-10T12:30:26.042569 | 2018-02-18T20:03:49 | 2018-02-18T20:03:49 | 118,442,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,941 | py | # -*- coding: utf-8 -*
import itertools
import one_experiment_report
import utils
import simple_nets
from math import floor, ceil
import matplotlib.pyplot as plt
import numpy as np
import os
import _pickle as pickle
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
from keras import optimizers
import time
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
from keras import losses
# варьируем один (или несколько) гиперпараметр - проводим таким образом серию экспериментов,
# результаты серии сводим в единый отчет: таблица из 2 столбцов (что вариьровали) и (за чем следили)
#one_experiment_report.main()
class Serial:
def __init__(self, dataset, dataset_name='default'):
self.batch_size = [3]
self.code_len = [2]
self.wb_koef_reg = [0.]
self.num_epochs = [2200]
self.drop_in_decoder = [0.0]
self.drop_in_encoder = [0.0]
self.activation = ['linear']
self.dataset = dataset
self.dataset_name = [dataset_name]
def _get_all_cominations(self):
"""
:return: список словарей - всех возхможных комбинаций значений гиперпараметров
"""
def enumdict(listed):
myDict = {}
for i, x in enumerate(listed):
myDict[i] = x
return myDict
hypermapars_arrays = self.__dict__
names = hypermapars_arrays.keys()
enumerated_names = enumdict(names) # например {0: 'code_len', 1: 'activation', 2: 'num_epochs'}
n_hyperparams = len(enumerated_names.keys())
a = [None] * n_hyperparams
for k in enumerated_names.keys():
name = enumerated_names[k]
a[k] = hypermapars_arrays[name]
all_combinations = list(itertools.product(*a))
all_dicts = []
for combination in all_combinations:
d = {}
for i in enumerated_names.keys():
name = enumerated_names[i]
d[name] = combination[i]
all_dicts.append(d)
return all_dicts
def make_experiments(self, folder_name=None):
all_dicts = self._get_all_cominations()
print("NUM EXPERIMENTS EXPECTED: " + str(len(all_dicts)))
outer_story = []
summaries = []
experiment_id = 0
if folder_name is None:
folder_name = utils.ask_user_for_name() # выбрать имя серии
if folder_name is None:
exit()
utils.setup_folder_for_results(folder_name)
folder_full_path = os.getcwd()
for params in all_dicts:
utils.setup_folder_for_results(str(experiment_id)) # имя эксперимента в серии
e = Experiment(params)
summary = e.run_it(outer_story=outer_story, name_of_experiment="experiment_" + str(experiment_id))
summary['experiment_name'] = experiment_id
all_report_line = {**params, **summary}
summaries.append(all_report_line)
experiment_id += 1
os.chdir(folder_full_path) # обратно в папку серии
doc = SimpleDocTemplate("seria_report.pdf", pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(outer_story)
return summaries
from keras.regularizers import Regularizer
from keras import backend as K
class ActivityRegularizer(Regularizer):
def __init__(self, l1=0., l2=0.):
self.l1 = l1
self.l2 = l2
def __call__(self,x):
loss = 0
#loss += self.l1 * K.sum(K.mean(K.abs(x), axis=0))
#loss += self.l2 * K.sum(K.mean(K.square(x), axis=0))
p1 = x[0]
p2 = x[1]
p3 = x[2]
loss = 0
return 0
def get_config(self):
return {"name": self.__class__.__name__,
"l1": self.l1,
"l2": self.l2}
class Experiment:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
def run_it(self, outer_story, name_of_experiment):
print("RUN: " + str(self.__dict__))
# вытаскиваем датасет из файла
foveas01 = utils.get_dataset(self.dataset)
a_regulariser = ActivityRegularizer(l1=0., l2=0.)
# создаем и обучаем модельку
en, de, ae = simple_nets.create_ae_YANA(encoding_dim=self.code_len,
input_data_shape=foveas01[0].shape,
activity_regulariser=a_regulariser,
koef_reg=self.wb_koef_reg,
activation_on_code=self.activation,
drop_in_decoder=self.drop_in_decoder,
drop_in_encoder=self.drop_in_encoder)
sgd = optimizers.SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
ae.compile(optimizer=sgd, loss=losses.mean_squared_error)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
history = ae.fit(foveas01, foveas01,
epochs=self.num_epochs,
#batch_size=ceil(len(foveas01) / 2),
batch_size=self.batch_size,
shuffle=False,
validation_data=(foveas01, foveas01),
callbacks=[early_stopping])
# по результатам обучения на этом датасетке генерим репорт
report = one_experiment_report.ReportOnPath(ae=ae, en=en, de=de,
dataset=foveas01,
history_obj=history,
name_of_experiment=self.dataset + "__" + name_of_experiment
)
report.create_summary()
summary, exp_outer_story = report.end()
outer_story += exp_outer_story
utils.save_all(encoder=en, decoder=de, autoencoder=ae)
return summary
def make_seria_on_dataset(dataset, name_of_seria=None):
old_dir = os.getcwd()
utils.setup_folder_for_results("SERIES")
s = Serial(dataset)
summaries = s.make_experiments(folder_name=name_of_seria)
pickle.dump(summaries, open("summaries_dicts.pkl", "wb"))
print("summaries is saved into: " + os.getcwd())
with open("settings.txt", "w") as text_file:
text_file.write(str(s.__dict__))
os.chdir(old_dir)
def get_dataset(a_dir):
return [os.path.join(a_dir, name, 'foveas.pkl') for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def learn_models_on_dataset(folder_with_dataset, name_for_experiment):
dataset = get_dataset(folder_with_dataset)
make_seria_on_dataset(dataset, name_for_experiment)
if __name__ == "__main__":
directory = 'C:\\Users\\neuro\\PycharmProjects\\cheini\\partial\\7x7'
learn_models_on_dataset(folder_with_dataset=directory,
name_for_experiment='7x7 last ITOG')
#directory1 = 'C:\\Users\\neuro\\PycharmProjects\\cheini\\partial\\7x7'
# dataset1 = get_dataset(directory1)
#make_seria_on_dataset(dataset1, "ITOG 7x7 partial_")
| [
"[email protected]"
] | |
06005fb2c3ba90f593ed444f209cd6a808e3114b | 907cb7612ede31418997ce7b2813c9f2192e6a30 | /phase_cells/focal_evaluate/printout_network.py | a7d12f6f8ac0762947ad20ae88fc7d697979018f | [
"MIT"
] | permissive | shenghh2015/segmentation_models | c3a6f9f0a7fc2ac52d0d1f6b2beef1c69133bae2 | 473c528c724f62ff38ac127747dd8babb7de6b85 | refs/heads/master | 2023-08-14T05:52:36.290536 | 2021-10-19T03:02:46 | 2021-10-19T03:02:46 | 276,793,700 | 0 | 0 | null | 2020-07-03T02:57:39 | 2020-07-03T02:57:38 | null | UTF-8 | Python | false | false | 648 | py | import os
import sys
sys.path.append('../')
import segmentation_models as sm
from segmentation_models import Unet
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
backbone = 'efficientnetb4'
model = Unet(backbone, input_shape = (736,736,3))
network_layers = model.layers
feature_layers = ['block6a_expand_activation', 'block4a_expand_activation','block3a_expand_activation', 'block2a_expand_activation']
with open('network_{}.txt'.format(backbone), 'w+') as f:
for layer in network_layers:
f.write('{}: {}\n'.format(layer.name, layer.output.get_shape()))
if layer.name in feature_layers:
f.write('\nFeature extansion ---{}\n'.format(layer.name))
| [
"[email protected]"
] | |
b99727124520efc1555a5d51225f48be9156a9ec | d8e4dece3a4c35c30ec6a90f6dc7bcf4ff43b4b4 | /searcher/server/query.py | 7e74d75d9f400b0199527b5f3e37b231f9a95987 | [] | no_license | wikty/MiniSearchEngine | c17160a9b65f462fa0690723aa860c6092dea97e | 63f7ef576f48b780fb8cf7fd3f6d955bc0037efd | refs/heads/master | 2021-01-13T02:44:39.591042 | 2018-09-13T06:40:47 | 2018-09-13T06:40:47 | 77,355,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from searcher.indexer.pipelines import Pipeline
from .ranker import rank
from .extractor import extract
def process(db, query):
[terms, _] = Pipeline.preprocess(query)
doc_info = db.get_doc_info(terms)
doc_list = rank(db, doc_info, terms)
return extract(doc_list) | [
"[email protected]"
] | |
3a40a1e42f60c1c9f14a8869461d90cc62d7f560 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /tests/test_rand_affine.py | 1e1a23bc0915f7025bb7fdc388ed9593b196b866 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 5,638 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import RandAffine
TEST_CASES = [
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None, spatial_size=-1),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3)), "spatial_size": (2, 2)},
np.array([[[2.0, 3.0], [5.0, 6.0]], [[11.0, 12.0], [14.0, 15.0]], [[20.0, 21.0], [23.0, 24.0]]]),
],
[
dict(as_tensor_output=True, device=None),
{"img": torch.ones((1, 3, 3, 3)), "spatial_size": (2, 2, 2)},
torch.ones((1, 2, 2, 2)),
],
[
dict(as_tensor_output=True, device=None, spatial_size=(2, 2, 2), cache_grid=True),
{"img": torch.ones((1, 3, 3, 3))},
torch.ones((1, 2, 2, 2)),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
cache_grid=True,
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8)), "spatial_size": (3, 3)},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
spatial_size=(3, 3),
cache_grid=True,
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8))},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
]
ARR_NUMPY = np.arange(9 * 10).reshape(1, 9, 10)
ARR_TORCH = torch.Tensor(ARR_NUMPY)
TEST_CASES_SKIPPED_CONSISTENCY = []
for im in (ARR_NUMPY, ARR_TORCH):
for as_tensor_output in (True, False):
for in_dtype_is_int in (True, False):
TEST_CASES_SKIPPED_CONSISTENCY.append((im, as_tensor_output, in_dtype_is_int))
class TestRandAffine(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_rand_affine(self, input_param, input_data, expected_val):
g = RandAffine(**input_param)
g.set_random_state(123)
result = g(**input_data)
if input_param.get("cache_grid", False):
self.assertTrue(g._cached_grid is not None)
self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor))
if isinstance(result, torch.Tensor):
np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4)
else:
np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4)
def test_ill_cache(self):
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True)
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True, spatial_size=(1, 1, -1))
@parameterized.expand(TEST_CASES_SKIPPED_CONSISTENCY)
def test_skipped_transform_consistency(self, im, as_tensor_output, in_dtype_is_int):
t1 = RandAffine(prob=0, as_tensor_output=as_tensor_output)
t2 = RandAffine(prob=1, spatial_size=(10, 11), as_tensor_output=as_tensor_output)
# change dtype to int32 or float32
if in_dtype_is_int:
im = im.astype("int32") if isinstance(im, np.ndarray) else im.int()
else:
im = im.astype("float32") if isinstance(im, np.ndarray) else im.float()
out1 = t1(im)
out2 = t2(im)
# check same type
self.assertEqual(type(out1), type(out2))
# check matching dtype
self.assertEqual(out1.dtype, out2.dtype)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
5b3b3aa4586919012b05a07fefa8087dd34de097 | d0d45247209d3eabc1cb6bc0b01a8c23f807820d | /tests/test_utility.py | 8c0fd5031a9c46032e233084a2dbabffcb1e5ae4 | [
"MIT"
] | permissive | yw5aj/trimesh | 2b102c5e265108ebd089023bb1c32b3217c35059 | f7dc490f7431ced7cc121369e96b9b2eeb17490d | refs/heads/master | 2021-01-20T03:25:26.772416 | 2017-04-27T16:15:10 | 2017-04-27T16:15:10 | 89,539,048 | 0 | 0 | null | 2017-04-27T00:37:43 | 2017-04-27T00:37:43 | null | UTF-8 | Python | false | false | 8,140 | py | import trimesh
import unittest
import logging
import time
import os
import sys
import inspect
import numpy as np
import json
from collections import deque
import generic as g
TEST_DIM = (100, 3)
TOL_ZERO = 1e-9
TOL_CHECK = 1e-2
log = logging.getLogger('trimesh')
log.addHandler(logging.NullHandler())
_QUICK = '-q' in sys.argv
class VectorTests(unittest.TestCase):
def setUp(self):
self.test_dim = TEST_DIM
def test_unitize_multi(self):
vectors = np.ones(self.test_dim)
vectors[0] = [0, 0, 0]
vectors, valid = trimesh.unitize(vectors, check_valid=True)
self.assertFalse(valid[0])
self.assertTrue(np.all(valid[1:]))
length = np.sum(vectors[1:] ** 2, axis=1) ** 2
length_check = np.abs(length - 1.0) < TOL_ZERO
self.assertTrue(np.all(length_check))
def test_align(self):
log.info('Testing vector alignment')
target = np.array([0, 0, 1])
for i in range(100):
vector = trimesh.unitize(np.random.random(3) - .5)
T = trimesh.geometry.align_vectors(vector, target)
result = np.dot(T, np.append(vector, 1))[0:3]
aligned = np.abs(result - target).sum() < TOL_ZERO
self.assertTrue(aligned)
def test_horn(self):
log.info('Testing absolute orientation')
for i in range(10):
points_A = (np.random.random(self.test_dim) - .5) * 100
angle = 4 * np.pi * (np.random.random() - .5)
vector = trimesh.unitize(np.random.random(3) - .5)
offset = 100 * (np.random.random(3) - .5)
T = trimesh.transformations.rotation_matrix(angle, vector)
T[0:3, 3] = offset
points_B = trimesh.transformations.transform_points(points_A, T)
M, error = trimesh.points.absolute_orientation(
points_A, points_B, return_error=True)
self.assertTrue(np.all(error < TOL_ZERO))
class UtilTests(unittest.TestCase):
def test_track(self):
a = trimesh.util.tracked_array(np.random.random(TEST_DIM))
modified = deque()
modified.append(int(a.md5(), 16))
a[0][0] = 10
modified.append(int(a.md5(), 16))
a[1] = 5
modified.append(int(a.md5(), 16))
a[2:] = 2
modified.append(int(a.md5(), 16))
self.assertTrue((np.diff(modified) != 0).all())
modified = deque()
modified.append(int(a.md5(), 16))
b = a[[0, 1, 2]]
modified.append(int(a.md5(), 16))
c = a[1:]
modified.append(int(a.md5(), 16))
self.assertTrue((np.diff(modified) == 0).all())
def test_bounds_tree(self):
for attempt in range(3):
for dimension in [2, 3]:
t = g.np.random.random((1000, 3, dimension))
bounds = g.np.column_stack((t.min(axis=1), t.max(axis=1)))
tree = g.trimesh.util.bounds_tree(bounds)
self.assertTrue(0 in tree.intersection(bounds[0]))
def test_strips(self):
'''
Test our conversion of triangle strips to face indexes.
'''
# test 4- triangle strip
s = [g.np.arange(6)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert (f == g.np.array([[0, 1, 2],
[3, 2, 1],
[2, 3, 4],
[5, 4, 3]])).all()
assert len(f) + 2 == len(s[0])
# test single triangle
s = [g.np.arange(3)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert (f == g.np.array([[0, 1, 2]])).all()
assert len(f) + 2 == len(s[0])
s = [g.np.arange(100)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert len(f) + 2 == len(s[0])
class SceneTests(unittest.TestCase):
def setUp(self):
filename = os.path.join(g.dir_models, 'box.STL')
mesh = trimesh.load(filename)
split = mesh.split()
scene = trimesh.scene.Scene(split)
self.scene = scene
def test_scene(self):
duplicates = self.scene.duplicate_nodes()
class IOTest(unittest.TestCase):
def test_dae(self):
a = g.get_mesh('ballA.off')
r = a.export(file_type='dae')
class ContainsTest(unittest.TestCase):
def test_inside(self):
sphere = g.trimesh.primitives.Sphere(radius=1.0, subdivisions=4)
g.log.info('Testing contains function with sphere')
samples = (np.random.random((1000, 3)) - .5) * 5
radius = np.linalg.norm(samples, axis=1)
margin = .05
truth_in = radius < (1.0 - margin)
truth_out = radius > (1.0 + margin)
contains = sphere.contains(samples)
if not contains[truth_in].all():
raise ValueError('contains test doesnt match truth!')
if contains[truth_out].any():
raise ValueError('contains test doesnt match truth!')
class MassTests(unittest.TestCase):
def setUp(self):
# inertia numbers pulled from solidworks
self.truth = g.data['mass_properties']
self.meshes = dict()
for data in self.truth:
filename = data['filename']
self.meshes[filename] = g.get_mesh(filename)
def test_mass(self):
def check_parameter(a, b):
check = np.all(
np.less(np.abs(np.array(a) - np.array(b)), TOL_CHECK))
return check
for truth in self.truth:
calculated = self.meshes[truth['filename']].mass_properties(density=truth[
'density'])
parameter_count = 0
for parameter in calculated.keys():
if not (parameter in truth):
continue
parameter_ok = check_parameter(
calculated[parameter], truth[parameter])
if not parameter_ok:
log.error('Parameter %s failed on file %s!',
parameter, truth['filename'])
self.assertTrue(parameter_ok)
parameter_count += 1
log.info('%i mass parameters confirmed for %s',
parameter_count, truth['filename'])
class SphericalTests(unittest.TestCase):
def test_spherical(self):
v = g.trimesh.unitize(g.np.random.random((1000, 3)) - .5)
spherical = g.trimesh.util.vector_to_spherical(v)
v2 = g.trimesh.util.spherical_to_vector(spherical)
self.assertTrue((np.abs(v - v2) < g.trimesh.constants.tol.merge).all())
class HemisphereTests(unittest.TestCase):
def test_hemisphere(self):
v = trimesh.unitize(np.random.random((10000, 3)) - .5)
v[0] = [0, 1, 0]
v[1] = [1, 0, 0]
v[2] = [0, 0, 1]
v = np.column_stack((v, -v)).reshape((-1, 3))
resigned = trimesh.util.vector_hemisphere(v)
check = (abs(np.diff(resigned.reshape((-1, 2, 3)),
axis=1).sum(axis=2)) < trimesh.constants.tol.zero).all()
self.assertTrue(check)
class FileTests(unittest.TestCase):
def test_io_wrap(self):
test_b = g.np.random.random(1).tostring()
test_s = 'this is a test yo'
res_b = g.trimesh.util.wrap_as_stream(test_b).read()
res_s = g.trimesh.util.wrap_as_stream(test_s).read()
self.assertTrue(res_b == test_b)
self.assertTrue(res_s == test_s)
def test_file_hash(self):
data = g.np.random.random(10).tostring()
path = g.os.path.join(g.dir_data, 'nestable.json')
for file_obj in [g.trimesh.util.wrap_as_stream(data),
open(path, 'rb')]:
start = file_obj.tell()
hashed = g.trimesh.util.hash_file(file_obj)
self.assertTrue(file_obj.tell() == start)
self.assertTrue(hashed is not None)
self.assertTrue(len(hashed) > 5)
file_obj.close()
if __name__ == '__main__':
trimesh.util.attach_to_log()
unittest.main()
| [
"[email protected]"
] | |
919ad93f8397a45a32157a3d9ce108dcda051ccb | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /configs/retinanet_x101_64x4d_fpn_1x.py | c8be724f92d2a09198980ad017f4851b0be09359 | [
"MIT"
] | permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 3,901 | py | # model settings
model = dict(
type='RetinaNet',
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
octave_base_scale=4,
scales_per_octave=3,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[8, 16, 32, 64, 128],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/retinanet_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
abb7a5ff2e147e6f3a2c6e5b5b06e12ddf6207c3 | 4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18 | /retiresmartz/tests/test_social_security.py | 79ee980a0f79030246707e0e54844ec9226eb916 | [] | no_license | WealthCity/django-project | 6668b92806d8c61ef9e20bd42daec99993cd25b2 | fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e | refs/heads/dev | 2021-01-19T14:10:52.115301 | 2017-04-12T11:23:32 | 2017-04-12T11:23:32 | 88,132,284 | 0 | 1 | null | 2017-04-13T06:26:30 | 2017-04-13T06:26:29 | null | UTF-8 | Python | false | false | 370 | py | from datetime import date
from django.test import TestCase
from retiresmartz.calculator.social_security import calculate_payments
class SocialSecurityTests(TestCase):
def test_calculate_payments(self):
amounts = calculate_payments(dob=date(1975, 1, 1), income=60000)
self.assertEqual(amounts[67], 2055)
self.assertEqual(amounts[68], 2219)
| [
"[email protected]"
] | |
4290f33117641c516843aeaf64025823ad951026 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f8052e4261238ff6c93465b3f0d0f22457f127ce-<container_run>-fix.py | d32a173f5a709bd873f8aaaa81b4fc29a4a7aeb0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | def container_run(platform: str, nvidia_runtime: bool, docker_registry: str, shared_memory_size: str, local_ccache_dir: str, command: List[str], cleanup: Cleanup, dry_run: bool=False) -> int:
'Run command in a container'
container_wait_s = 600
environment = {
'CCACHE_MAXSIZE': '500G',
'CCACHE_TEMPDIR': '/tmp/ccache',
'CCACHE_DIR': '/work/ccache',
'CCACHE_LOGFILE': '/tmp/ccache.log',
}
jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG']
environment.update({k: os.environ[k] for k in jenkins_env_vars if (k in os.environ)})
environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if (k in os.environ)})
tag = get_docker_tag(platform=platform, registry=docker_registry)
mx_root = get_mxnet_root()
local_build_folder = buildir()
os.makedirs(local_build_folder, exist_ok=True)
os.makedirs(local_ccache_dir, exist_ok=True)
logging.info('Using ccache directory: %s', local_ccache_dir)
docker_client = docker.from_env()
docker_cmd_list = [get_docker_binary(nvidia_runtime), 'run', '--cap-add', 'SYS_PTRACE', '--rm', '--shm-size={}'.format(shared_memory_size), '-v', '{}:/work/mxnet'.format(mx_root), '-v', '{}:/work/build'.format(local_build_folder), '-v', '{}:/work/ccache'.format(local_ccache_dir), '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']), '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']), '-e', 'CCACHE_DIR={}'.format(environment['CCACHE_DIR']), '-e', 'CCACHE_LOGFILE={}'.format(environment['CCACHE_LOGFILE']), '-ti', tag]
docker_cmd_list.extend(command)
docker_cmd = ' \\\n\t'.join(docker_cmd_list)
logging.info('Running %s in container %s', command, tag)
logging.info('Executing the equivalent of:\n%s\n', docker_cmd)
ret = 0
if (not dry_run):
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
runtime = None
if nvidia_runtime:
runtime = 'nvidia'
container = docker_client.containers.run(tag, runtime=runtime, detach=True, command=command, shm_size=shared_memory_size, user='{}:{}'.format(os.getuid(), os.getgid()), cap_add='SYS_PTRACE', volumes={
mx_root: {
'bind': '/work/mxnet',
'mode': 'rw',
},
local_build_folder: {
'bind': '/work/build',
'mode': 'rw',
},
local_ccache_dir: {
'bind': '/work/ccache',
'mode': 'rw',
},
}, environment=environment)
try:
logging.info('Started container: %s', trim_container_id(container.id))
cleanup.add_container(container)
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info('Waiting for status of container %s for %d s.', trim_container_id(container.id), container_wait_s)
wait_result = container.wait(timeout=container_wait_s)
logging.info('Container exit status: %s', wait_result)
ret = wait_result.get('StatusCode', 200)
except Exception as e:
logging.exception(e)
ret = 150
try:
logging.info('Stopping container: %s', trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
try:
logging.info('Removing container: %s', trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
cleanup.remove_container(container)
containers = docker_client.containers.list()
if containers:
logging.info('Other running containers: %s', [trim_container_id(x.id) for x in containers])
except docker.errors.NotFound as e:
logging.info('Container was stopped before cleanup started: %s', e)
return ret | [
"[email protected]"
] | |
60ba9feb268c4d6bdb08de9c05f99d96d934f28e | 6b95f96e00e77f78f0919c10b2c90f116c0b295d | /TelstraTPN/models/body.py | 6402d1a63505555d93481a28e94f4ec6e6af57af | [] | no_license | telstra/Programmable-Network-SDK-python | 0522b54dcba48e16837c6c58b16dabde83b477d5 | d1c19c0383af53a5f09a6f5046da466ae6e1d97a | refs/heads/master | 2021-09-19T17:09:06.831233 | 2018-07-30T03:22:26 | 2018-07-30T03:22:26 | 113,531,312 | 3 | 1 | null | 2018-07-30T03:22:27 | 2017-12-08T04:23:15 | Python | UTF-8 | Python | false | false | 3,864 | py | # coding: utf-8
"""
Telstra Programmable Network API
Telstra Programmable Network is a self-provisioning platform that allows its users to create on-demand connectivity services between multiple end-points and add various network functions to those services. Programmable Network enables to connectivity to a global ecosystem of networking services as well as public and private cloud services. Once you are connected to the platform on one or more POPs (points of presence), you can start creating those services based on the use case that you want to accomplish. The Programmable Network API is available to all customers who have registered to use the Programmable Network. To register, please contact your account representative. # noqa: E501
OpenAPI spec version: 2.4.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Body(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'renewal_option': 'int'
}
attribute_map = {
'renewal_option': 'renewal-option'
}
def __init__(self, renewal_option=None): # noqa: E501
"""Body - a model defined in OpenAPI""" # noqa: E501
self._renewal_option = None
self.discriminator = None
if renewal_option is not None:
self.renewal_option = renewal_option
@property
def renewal_option(self):
"""Gets the renewal_option of this Body. # noqa: E501
\"Renewal Option: 0=Auto Disconnect, 1=Auto Renew, 2=Pay per hour\" # noqa: E501
:return: The renewal_option of this Body. # noqa: E501
:rtype: int
"""
return self._renewal_option
@renewal_option.setter
def renewal_option(self, renewal_option):
"""Sets the renewal_option of this Body.
\"Renewal Option: 0=Auto Disconnect, 1=Auto Renew, 2=Pay per hour\" # noqa: E501
:param renewal_option: The renewal_option of this Body. # noqa: E501
:type: int
"""
self._renewal_option = renewal_option
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
62363c4bcc24d91b45188cf7f657fda66070fe0d | 37146b1529bfb8094d5ef17734498aba1e701b33 | /python/python_course/pythonStudy4/FangWenKongZhiExample.py | 82883cdaa0bcd2c25e723093041cf2edf8fa576c | [] | no_license | nanfeng729/code-for-test | 9c8e3736ac4c86a43002a658faf37349817de130 | 28071453c38742bffd5b5bdf7461bffdaa6c96be | refs/heads/master | 2022-10-07T17:59:44.558278 | 2020-06-06T10:19:19 | 2020-06-06T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | class fwkz:
a = 0
b = 0
_c = 0 # 定义受保护的属性
__d = 0 # 定义私有的属性
def jisuan(self):
return self.a + self.b
def jisuan2(self):
return self.a + self._c
def jisuan3(self):
return self.b + self.__d | [
"[email protected]"
] | |
c1bc4002b45701e9ddcbdae5fbd9f337effbe930 | 587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a | /leetcode/1501-1800/1785.py | d13fff05ea446efdd48f2dcbc0f23ee12d81b53b | [] | no_license | Rivarrl/leetcode_python | 8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99 | dbe8eb449e5b112a71bc1cd4eabfd138304de4a3 | refs/heads/master | 2021-06-17T15:21:28.321280 | 2021-03-11T07:28:19 | 2021-03-11T07:28:19 | 179,452,345 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # -*- coding: utf-8 -*-
# ======================================
# @File : 1785
# @Time : 2021/3/8 12:20
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[1785. 构成特定和需要添加的最少元素](https://leetcode-cn.com/problems/minimum-elements-to-add-to-form-a-given-sum/)
"""
@timeit
def minElements(self, nums: List[int], limit: int, goal: int) -> int:
return (abs(sum(nums) - goal) + limit - 1) // limit
if __name__ == '__main__':
a = Solution()
a.minElements(nums = [1,-1,1], limit = 3, goal = -4)
a.minElements(nums = [1,-10,9,1], limit = 100, goal = 0) | [
"[email protected]"
] | |
6f4f236a04b08ff986588d8d74bf27e19b3776ce | a9958f7c7887a92ec9fc48b02ed8a5cb75a03311 | /db.py | 1b274e58f4478c7f209d2e9b19cf25ce7d613166 | [] | no_license | ahmedfadhil/Dynamically-Weighted-Bandwidth- | 816c18777b49f3520433e65accf9e179f64e0836 | 1c8821aec73f32e704d12cebffcda01d1319bc80 | refs/heads/master | 2021-01-02T09:39:58.814463 | 2017-08-03T21:55:26 | 2017-08-03T21:55:26 | 99,271,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
style.use('ggplot')
X, y = make_blobs(n_samples=15, centers=3, n_features=2)
##X = np.array([[1, 2],
## [1.5, 1.8],
## [5, 8],
## [8, 8],
## [1, 0.6],
## [9, 11],
## [8, 2],
## [10, 2],
## [9, 3]])
##plt.scatter(X[:, 0],X[:, 1], marker = "x", s=150, linewidths = 5, zorder = 10)
##plt.show()
'''
1. Start at every datapoint as a cluster center
2. take mean of radius around cluster, setting that as new cluster center
3. Repeat #2 until convergence.
'''
class Mean_Shift:
def __init__(self, radius=None, radius_norm_step=100):
self.radius = radius
self.radius_norm_step = radius_norm_step
def fit(self, data):
if self.radius == None:
all_data_centroid = np.average(data, axis=0)
all_data_norm = np.linalg.norm(all_data_centroid)
self.radius = all_data_norm / self.radius_norm_step
print(self.radius)
centroids = {}
for i in range(len(data)):
centroids[i] = data[i]
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
distance = np.linalg.norm(featureset - centroid)
if distance == 0:
distance = 0.00000000001
weight_index = int(distance / self.radius)
if weight_index > self.radius_norm_step - 1:
weight_index = self.radius_norm_step - 1
to_add = (weights[weight_index] ** 2) * [featureset]
in_bandwidth += to_add
new_centroid = np.average(in_bandwidth, axis=0)
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
for i in uniques:
for ii in [i for i in uniques]:
if i == ii:
pass
elif np.linalg.norm(np.array(i) - np.array(ii)) <= self.radius:
# print(np.array(i), np.array(ii))
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except:
pass
prev_centroids = dict(centroids)
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
optimized = True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if optimized:
break
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
# compare distance to either centroid
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
# print(distances)
classification = (distances.index(min(distances)))
# featureset that belongs to that cluster
self.classifications[classification].append(featureset)
def predict(self, data):
# compare distance to either centroid
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
return classification
clf = Mean_Shift()
clf.fit(X)
centroids = clf.centroids
print(centroids)
colors = 10 * ['r', 'g', 'b', 'c', 'k', 'y']
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker="x", color=color, s=150, linewidths=5, zorder=10)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker="*", s=150, linewidths=5)
plt.show() | [
"[email protected]"
] | |
bca17f6f16c5c7b53f36b1772c1609844002a2d0 | 45a61af9028a1805c08b6f7638c7aebe8140bd2d | /Groundwater/mf6/autotest/test_z01_nightly_build_examples.py | 271101a6d317d1429ed62e0954a5f125c5a0fd18 | [] | no_license | gumilar19/Personal | 1c1fac036af3a4b9d4d425b7c8cb604271b94fd2 | c666b07c5184006aca8e6ad946cc98ef72dfe9fe | refs/heads/master | 2023-08-14T20:39:07.164849 | 2021-09-29T11:19:10 | 2021-09-29T11:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,397 | py | import os
import sys
import subprocess
import pathlib
try:
import pymake
except:
msg = 'Error. Pymake package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'
raise Exception(msg)
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from simulation import Simulation
def get_example_directory(base, fdir, subdir='mf6'):
exdir = None
for root, dirs, files in os.walk(base):
for d in dirs:
if d.startswith(fdir):
exdir = os.path.abspath(os.path.join(root, d, subdir))
break
if exdir is not None:
break
return exdir
# find path to modflow6-testmodels or modflow6-testmodels.git directory
home = os.path.expanduser('~')
print('$HOME={}'.format(home))
fdir = 'modflow6-testmodels'
exdir = get_example_directory(home, fdir, subdir='mf6')
if exdir is None:
p = pathlib.Path(os.getcwd())
home = os.path.abspath(pathlib.Path(*p.parts[:2]))
print('$HOME={}'.format(home))
exdir = get_example_directory(home, fdir, subdir='mf6')
if exdir is not None:
assert os.path.isdir(exdir)
def get_branch():
try:
# determine current buildstat branch
b = subprocess.Popen(("git", "status"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
if isinstance(b, bytes):
b = b.decode('utf-8')
# determine current buildstat branch
for line in b.splitlines():
if 'On branch' in line:
branch = line.replace('On branch ', '').rstrip()
except:
branch = None
return branch
def get_mf6_models():
"""
Get a list of test models
"""
# determine if running on travis
is_travis = 'TRAVIS' in os.environ
is_github_action = 'CI' in os.environ
# get current branch
is_CI = False
if is_travis:
is_CI = True
branch = os.environ['BRANCH']
elif is_github_action:
is_CI = True
branch = os.path.basename(os.environ['GITHUB_REF'])
else:
branch = get_branch()
print('On branch {}'.format(branch))
# tuple of example files to exclude
exclude = (None,)
# update exclude
if is_CI:
exclude_CI = ('test022_MNW2_Fig28',
'test007_751x751_confined')
exclude = exclude + exclude_CI
exclude = list(exclude)
# write a summary of the files to exclude
print('list of tests to exclude:')
for idx, ex in enumerate(exclude):
print(' {}: {}'.format(idx + 1, ex))
# build list of directories with valid example files
if exdir is not None:
dirs = [d for d in os.listdir(exdir)
if 'test' in d and d not in exclude]
else:
dirs = []
# exclude dev examples on master or release branches
if 'master' in branch.lower() or 'release' in branch.lower():
drmv = []
for d in dirs:
if '_dev' in d.lower():
drmv.append(d)
for d in drmv:
dirs.remove(d)
# sort in numerical order for case sensitive os
if len(dirs) > 0:
dirs = sorted(dirs, key=lambda v: (v.upper(), v[0].islower()))
# determine if only a selection of models should be run
select_dirs = None
select_packages = None
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--sim':
if len(sys.argv) > idx + 1:
select_dirs = sys.argv[idx + 1:]
break
elif arg.lower() == '--pak':
if len(sys.argv) > idx + 1:
select_packages = sys.argv[idx + 1:]
select_packages = [item.upper() for item in select_packages]
break
elif arg.lower() == '--match':
if len(sys.argv) > idx + 1:
like = sys.argv[idx + 1]
dirs = [item for item in dirs if like in item]
break
# determine if the selection of model is in the test models to evaluate
if select_dirs is not None:
found_dirs = []
for d in select_dirs:
if d in dirs:
found_dirs.append(d)
dirs = found_dirs
if len(dirs) < 1:
msg = 'Selected models not available in test'
print(msg)
# determine if the specified package(s) is in the test models to evaluate
if select_packages is not None:
found_dirs = []
for d in dirs:
pth = os.path.join(exdir, d)
namefiles = pymake.get_namefiles(pth)
ftypes = []
for namefile in namefiles:
ftype = pymake.get_mf6_ftypes(namefile, select_packages)
if ftype not in ftypes:
ftypes += ftype
if len(ftypes) > 0:
ftypes = [item.upper() for item in ftypes]
for pak in select_packages:
if pak in ftypes:
found_dirs.append(d)
break
dirs = found_dirs
if len(dirs) < 1:
msg = 'Selected packages not available ['
for pak in select_packages:
msg += ' {}'.format(pak)
msg += ']'
print(msg)
return dirs
def get_htol(dir):
htol = None
if dir == 'test059_mvlake_laksfr_tr':
if sys.platform.lower() == 'darwin':
htol = 0.002
return htol
def run_mf6(sim):
"""
Run the MODFLOW 6 simulation and compare to existing head file or
appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run.
"""
print(os.getcwd())
src = os.path.join(exdir, sim.name)
dst = os.path.join('temp', sim.name)
sim.setup(src, dst)
sim.run()
sim.compare()
sim.teardown()
def test_mf6model():
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf6_models()
# run the test models
for dir in dirs:
yield run_mf6, Simulation(dir, htol=get_htol(dir))
return
def dir_avail():
avail = False
if exdir is not None:
avail = os.path.isdir(exdir)
if not avail:
print('"{}" does not exist'.format(exdir))
print('no need to run {}'.format(os.path.basename(__file__)))
return avail
def main():
# write message
tnam = os.path.splitext(os.path.basename(__file__))[0]
msg = 'Running {} test'.format(tnam)
print(msg)
# determine if test directory exists
dirtest = dir_avail()
if not dirtest:
return
# get a list of test models to run
dirs = get_mf6_models()
# run the test models
for dir in dirs:
sim = Simulation(dir, htol=get_htol(dir))
run_mf6(sim)
return
if __name__ == "__main__":
print('standalone run of {}'.format(os.path.basename(__file__)))
delFiles = True
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--keep':
if len(sys.argv) > idx + 1:
delFiles = False
break
# run main routine
main()
| [
"[email protected]"
] | |
2c84b88b2248d07bc3fdaaa1c84bb232af9210d9 | 6558766df338730772d02a318e65bfa46cff40b6 | /apps/openprofession/migrations/0037_simulizatordata.py | 856c0742ca3f953cf907bf71e7a9709af76ae251 | [] | no_license | ITOO-UrFU/openedu | 02dc265872e2de1d74b1e8eca0c6596c7860841a | 7c6507d671653fc0ccf35b5305f960eb32e7159f | refs/heads/master | 2021-01-20T21:16:39.987744 | 2019-08-07T10:02:12 | 2019-08-07T10:02:12 | 101,761,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-13 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openprofession', '0036_pdavailable'),
]
operations = [
migrations.CreateModel(
name='SimulizatorData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fio', models.CharField(max_length=2048, verbose_name='ФИО')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('phone', models.CharField(max_length=255, verbose_name='Телефон')),
('username', models.CharField(blank=True, max_length=255, null=True, verbose_name='username')),
('password', models.CharField(blank=True, max_length=255, null=True, verbose_name='password')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
options={
'verbose_name': 'заявка на участие в симуляторе',
'verbose_name_plural': 'заявки на участие в симуляторе',
},
),
]
| [
"[email protected]"
] | |
6230c89fbf90c5fe08760c737ce41aeee110b049 | fde10302616f4bbba5a67a33decb65e47765e268 | /misc/v1/reconstruction/meshroom_to_log.py | 798f63beb6c87bdb1d5544ec8dea13d120d761ec | [] | no_license | laurelkeys/ff | b1f562f2e3caf2cd0616ca93fff4fb3872e55cdc | bac774e1f7b3131f559ee3ff1662836c424ebaa5 | refs/heads/master | 2023-02-23T17:46:49.011034 | 2022-01-21T20:31:59 | 2022-01-21T20:31:59 | 214,757,656 | 1 | 1 | null | 2023-02-11T00:30:56 | 2019-10-13T03:58:59 | Python | UTF-8 | Python | false | false | 4,269 | py | import os
import glob
import json
import argparse
import collections
import numpy as np
# ref.:
# [1] https://www.tanksandtemples.org/tutorial/
# [2] https://colmap.github.io/format.html#images-txt
# [3] https://github.com/colmap/colmap/blob/dev/src/estimators/pose.h#L125
# [4] https://github.com/alicevision/meshroom/wiki/Using-known-camera-positions
# [5] https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
# [6] https://github.com/alicevision/meshroom/issues/787
# FIXME rename, so it's not confused with trajectory_io
class CameraPose:
def __init__(self, pose_id, image_path, log_matrix):
self.id = pose_id
self.image_path = image_path
self.log_matrix = log_matrix
def write_SfM_log(T, i_map, filename):
with open(filename, 'w') as f:
for i, traj in enumerate(T):
metadata = i_map[i]
pose = traj.tolist()
f.write(' '.join(map(str, metadata)) + '\n')
f.write('\n'.join(' '.join(
map('{0:.12f}'.format, pose[i])
) for i in range(4)))
f.write('\n')
def convert_Meshroom_to_log(filename, logfile_out, input_images, formatp):
input_images_list = glob.glob(f"{input_images}/*.{formatp}")
if len(input_images_list) == 0:
print("Warning: no images were found (try setting --formatp)")
input_images_list.sort()
n_of_images = len(input_images_list)
T, i_map, TF, i_mapF = [], [], [], []
views = {}
camera_poses = []
with open(filename, 'r') as sfm_file:
sfm_data = json.load(sfm_file)
for view in sfm_data['views']:
views[view['poseId']] = view['path'] # NOTE equal to the 'viewId'
for camera_pose in sfm_data['poses']:
pose_id = camera_pose['poseId']
pose_transform = camera_pose['pose']['transform']
# 3x3 (column-major) rotation matrix
rotation = np.array(
[float(_) for _ in pose_transform['rotation']]
).reshape((3, 3))
rotation[:, 1:] *= -1 # ref.: [2]
# camera center in world coordinates
center = np.array([float(_) for _ in pose_transform['center']])
# homogeneous transformation matrix
mat = np.identity(4)
mat[:3, :3] = rotation
mat[:3, 3] = center
camera_poses.append(CameraPose(pose_id, views[pose_id], mat))
for pose in camera_poses:
A = np.matrix(pose.log_matrix)
T.append(A.I)
image_name = os.path.basename(pose.image_path)
matching = [i for i, s in enumerate(input_images_list) if image_name in s]
i_map.append([pose.id, matching[0], 0])
for k in range(n_of_images):
try:
# find the k-th view id
view_id = [i for i, item in enumerate(i_map) if k == item[1]][0]
i_mapF.append(np.array([k, k, 0], dtype='int'))
TF.append(T[view_id])
except IndexError:
# assign the identity matrix to the k-th view id
# as the log file needs an entry for every image
i_mapF.append(np.array([k, -1, 0], dtype='int'))
TF.append(np.identity(4))
write_SfM_log(TF, i_mapF, logfile_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert Meshroom .sfm data into the Tanks and Temples .log file format"
)
parser.add_argument("in_sfm_fname", help="Input .sfm filename")
parser.add_argument("out_log_fname", help="Output .log filename")
parser.add_argument("images_folder", help="Input images folder path")
parser.add_argument("--formatp", default="jpg", help="Images format")
args = parser.parse_args()
# NOTE .sfm is actually a JSON
_, ext = os.path.splitext(args.in_sfm_fname)
assert ext.lower() in [".sfm", ".json"]
assert os.path.isfile(args.in_sfm_fname)
assert os.path.isdir(args.images_folder)
convert_Meshroom_to_log(
args.in_sfm_fname,
args.out_log_fname,
args.images_folder, args.formatp
)
# e.g.: python meshroom_to_log.py models\Monstree6\Meshroom\publish\cameras.json models\Monstree6\pointcloud\Monstree6_Meshroom_SfM.log models\Monstree6\images\
| [
"[email protected]"
] | |
4a93f895c4f634e938a00892439e5aa761ecf1b5 | 3d61fe0f49f5d344fc32a6faa799f0a46deec9a5 | /2017/AoC-2017-13v2.py | 290341cd10f0980f65f036c7d6c15a02ddab3382 | [] | no_license | sbeaumont/AoC | 558296fd26cd5272e33d3cb9113c09e4945c98ac | 406eda614d8434d8feb71fe1262f1fda54972a12 | refs/heads/master | 2022-12-13T07:38:36.089775 | 2022-12-04T21:11:49 | 2022-12-04T21:11:49 | 75,467,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | PUZZLE_INPUT_FILE_NAME = "AoC-2017-13-input.txt"
# Parse parent and children names
with open(PUZZLE_INPUT_FILE_NAME) as puzzle_input_file:
firewall = {int(line.split(":")[0]): int(line.split(":")[1]) for line in puzzle_input_file.readlines()}
max_depth = max(firewall, key=firewall.get)
def check_layers(wait_time):
severity = 0
for d, r in firewall.iteritems():
at_layer_time = wait_time + d
if at_layer_time % (2*r-2) == 0:
severity += d * r
return severity
print(check_layers(0))
# delay = 0
# sev = 1
# while sev:
# pass | [
"[email protected]"
] | |
2670d4a865a34c6b12557710f3b157b604b6bf68 | 148cb99e0f23679c20243470ad62dc4155aa5252 | /baseinfo/migrations/0016_auto_20191206_0806.py | 8d98453f8d32b5509559f2cb37242495b58c3609 | [] | no_license | Hamidnet220/tax | 46060f24b55a4f348194599d59247ff9435f4379 | 000051be5df6a98f679d13a94e37b9ee30efd5a9 | refs/heads/master | 2020-06-19T09:41:20.998214 | 2019-12-10T01:01:17 | 2019-12-10T01:01:17 | 196,666,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.1.7 on 2019-12-06 08:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('baseinfo', '0015_guarantee_guarantee_file'),
]
operations = [
migrations.AlterField(
model_name='guarantee',
name='guarantee_type',
field=models.IntegerField(choices=[(1, ' ضمانت نامه شرکت در مناقصه'), (2, ' ضمانت نامه پیش پرداخت'), (3, ' ضمانت نامه حسن انجام کار')], verbose_name='عنوان ضمانت نامه'),
),
]
| [
"[email protected]"
] | |
b90a0305484644a6728e50d68732ee9e6989bb14 | 478fad340a97fc14d365b95bbd6f8ac1dcc71953 | /121/Solution.py | d76a39e78ef9cadd8e4004cc32002f4a3d0d5986 | [] | no_license | sandyg05/leetcode | 93cca3b3ce4f38cf1ea1c6d3e8400d7b6b776c37 | e9d8036e2be6dbd1b8c958431e07dc35b88ebfa8 | refs/heads/master | 2022-07-16T10:03:59.529470 | 2020-05-13T05:35:49 | 2020-05-13T05:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | """
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.
Note that you cannot sell a stock before you buy one.
Example 1:
Input: [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Not 7-1 = 6, as selling price needs to be larger than buying price.
Example 2:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class Solution:
def maxProfit(self, prices):
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for num in prices:
if num < min_price:
min_price = num
if num - min_price > max_profit:
max_profit = num - min_price
return max_profit | [
"[email protected]"
] | |
2bc1fcc7b2f69fdf2a3224d4812bd611106212fd | ca3a49676cdf1016b2d729f0432b451d35b7a281 | /bad-solutions/add.py | 698f358d6ce95ac0d0d3832d8c44a19f39928fd9 | [
"MIT"
] | permissive | SquareandCompass/code-align-evals-data | 3bb71b605316f56bb27466f23706a329f3fb4938 | 97446d992c3785d6605f1500b2c9b95d042e7b9c | refs/heads/main | 2023-06-19T12:47:56.277363 | 2021-07-21T00:22:56 | 2021-07-21T00:22:56 | 640,147,842 | 0 | 1 | null | 2023-05-13T06:22:30 | 2023-05-13T06:22:29 | null | UTF-8 | Python | false | false | 575 | py | def add(lst):
"""Given a non-empty list of integers lst. add the even elements that are at odd indices..
Examples:
add([4, 2, 6, 7]) ==> 2
"""
return sum([lst[i] for i in range(1, len(lst) / 2, 2) if lst[i] % 2 == 0])
def check(candidate):
# Check some simple cases
assert candidate([4, 88]) == 88
assert candidate([4, 5, 6, 7, 2, 122]) == 122
assert candidate([4, 0, 6, 7]) == 0
assert candidate([4, 4, 6, 8]) == 12
# Check some edge cases that are easy to work out by hand.
if __name__ == "__main__":
check(add)
| [
"[email protected]"
] | |
0e9a38665795bd642e825d58f2ad24a34ebb9439 | f8c3c677ba536fbf5a37ac4343c1f3f3acd4d9b6 | /ICA_SDK/test/test_instrument.py | aeb50805e0f9f55fb14b9f8cfa35dbca74de8c92 | [] | no_license | jsialar/integrated_IAP_SDK | 5e6999b0a9beabe4dfc4f2b6c8b0f45b1b2f33eb | c9ff7685ef0a27dc4af512adcff914f55ead0edd | refs/heads/main | 2023-08-25T04:16:27.219027 | 2021-10-26T16:06:09 | 2021-10-26T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # coding: utf-8
"""
IAP Services
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ICA_SDK
from ICA_SDK.models.instrument import Instrument # noqa: E501
from ICA_SDK.rest import ApiException
class TestInstrument(unittest.TestCase):
"""Instrument unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Instrument
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ICA_SDK.models.instrument.Instrument() # noqa: E501
if include_optional :
return Instrument(
id = '0',
name = '0',
description = '0',
serial_number = '0',
control_software_version = '0',
operating_software_version = '0',
instrument_type = '0'
)
else :
return Instrument(
)
def testInstrument(self):
"""Test Instrument"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
82e143ab368a2df624d5ba0dd94ba697a8484a59 | 59e87634c67508bf7eba8c8b9845354aefa57bc7 | /DL/yolo/YOLOV1/py_cpu_nms.py | 2ddfa1b49d7699c559417a89c11064070d65ca39 | [] | no_license | Caohengrui/MLAndDL | 48729b94b2232e628b699cf8d0d4a6c6e81a36f5 | d0637f58f45e9c091cd90bbfe9c207223d0994f3 | refs/heads/master | 2023-03-16T01:06:03.316463 | 2020-04-14T07:44:15 | 2020-04-14T07:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
# scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
| [
"[email protected]"
] | |
95ea3a56c120bb0d2c831b76684d982b54b6c5aa | 68bad4b3d92872bb5b77b4ee503e588d20511a27 | /python/core/test_scripts_MPI/my_population_collect_spikes_mpi.py | ff1caea364466e952b5219ea999cbb2671552f87 | [] | no_license | mickelindahl/bgmodel | 647be626a7311a8f08f3dfc897c6dd4466fc0a92 | 78e6f2b73bbcbecd0dba25caf99f835313c914ee | refs/heads/master | 2023-08-29T13:57:04.122115 | 2022-02-11T14:28:23 | 2022-02-11T14:28:23 | 17,148,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | '''
Created on Sep 22, 2014
@author: mikael
'''
import numpy
import pickle
import sys
from toolbox.data_to_disk import mkdir
from toolbox.my_nest import collect_spikes_mpi
from toolbox.parallelization import comm
print sys.argv
fileName, =sys.argv[1:]
fileName+='data'
s,e=numpy.ones(2)*comm.rank(),numpy.ones(2)*comm.rank()+1
s, e= collect_spikes_mpi(s, e)
mkdir('/'.join(fileName.split('/')[0:-1]))
if comm.rank()==0:
print 'File name'
print fileName
if 4<len(fileName) and fileName[-4:]!='.pkl':
fileName=fileName+'.pkl'
f=open(fileName, 'wb') #open in binary mode
pickle.dump([s,e], f, -1)
f.close()
| [
"[email protected]"
] | |
c64bb122fa1b142b05e8315ac85b8ea4cec85786 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2019_03_08_14_00_41_432668.py | 4bdc1e00e92765b8d5b29e95dceff6a7256f3781 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | import math
def calcula_gaussiana(x, mi, sigma):
if (sigma == 1 and x == 0 and mi == 0):
return 0
if (sigma == 0 or sigma == - math.sqrt(2*math.pi) or sigma == 1/math.sqrt(2*math.pi)):
return 0
return (1/sigma*math.sqrt(2*math.pi))**(-0.5((x - mi)/sigma)**2) | [
"[email protected]"
] | |
278be94dc86a4923595fc1db156514e63a55f1c3 | 9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6 | /deals/migrations/0001_initial.py | 4a8460e46f0e4b39cc2b66694382c60ac4a670ac | [] | no_license | odbalogun/ticketr | e9fe8461d66dabe395f0e1af8fbecc67dbb16e97 | 94f24c82f407f861f1614a151feb3fdd62b283e5 | refs/heads/master | 2022-11-30T22:40:30.931160 | 2019-08-09T14:34:38 | 2019-08-09T14:34:38 | 188,833,600 | 0 | 0 | null | 2022-11-22T03:50:30 | 2019-05-27T11:50:07 | Python | UTF-8 | Python | false | false | 3,263 | py | # Generated by Django 2.2.1 on 2019-05-06 23:17
import deals.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DealCategories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('price', models.FloatField(verbose_name='price')),
('description', models.TextField(verbose_name='description')),
('image', models.ImageField(upload_to=deals.models.deals_image_path, verbose_name='image')),
('quantity', models.IntegerField(null=True, verbose_name='quantity')),
('available_quantity', models.IntegerField(null=True, verbose_name='available quantity')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deals.Categories')),
],
),
migrations.CreateModel(
name='Deals',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
('description', models.TextField(verbose_name='description')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('expiry_date', models.DateField(null=True, verbose_name='expiry date')),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('options', models.ManyToManyField(to='deals.DealCategories')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='dealcategories',
name='deal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deals.Deals'),
),
migrations.AlterUniqueTogether(
name='dealcategories',
unique_together={('category', 'deal')},
),
]
| [
"[email protected]"
] | |
34c06dc74f45348f0075ae426c9ad58a2c008486 | 9bdc2e9f0382bd96ef3af4f9eca94fa58c5a4dc1 | /keras/mnist-privacy/model/pipeline_train.py | 0687d543d7075f6d1210e6bc5a96b7c003608086 | [
"Apache-2.0"
] | permissive | shicongisme/models | 90cf9a84b47c8d2a4de51fdfb7f6c4b9f796e317 | d8df07877aa8b10ce9b84983bb440af75e84dca7 | refs/heads/master | 2022-02-01T12:01:11.443827 | 2019-05-26T22:25:04 | 2019-05-26T22:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,112 | py | # Copyright 2018, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training a CNN on MNIST with differentially private SGD optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from privacy.analysis.rdp_accountant import compute_rdp
from privacy.analysis.rdp_accountant import get_privacy_spent
from privacy.optimizers import dp_optimizer
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False,'
'train with vanilla SGD.')
tf.flags.DEFINE_float('learning_rate', 0.08, 'Learning rate for training')
tf.flags.DEFINE_float('noise_multiplier', 1.12,
'Ratio of the standard deviation to the clipping norm')
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
tf.flags.DEFINE_integer('batch_size', 32, 'Batch size')
tf.flags.DEFINE_integer('epochs', 1, 'Number of epochs')
tf.flags.DEFINE_integer('microbatches', 32,
'Number of microbatches (must evenly divide batch_size')
tf.flags.DEFINE_string('model_dir', None, 'Model directory')
tf.flags.DEFINE_string('export_dir', './pipeline_tfserving/0', 'Export dir')
FLAGS = tf.flags.FLAGS
def cnn_model_fn(features, labels, mode):
"""Model function for a CNN."""
# Define CNN architecture using tf.keras.layers.
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
y = tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
kernel_initializer='he_normal').apply(input_layer)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
kernel_initializer='he_normal').apply(y)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Flatten().apply(y)
y = tf.keras.layers.Dense(32, kernel_initializer='he_normal').apply(y)
logits = tf.keras.layers.Dense(10, kernel_initializer='he_normal').apply(y)
# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
# Define mean of loss across minibatch (for reporting through tf.Estimator).
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf.estimator.ModeKeys.TRAIN:
if FLAGS.dpsgd:
# Use DP version of GradientDescentOptimizer. For illustration purposes,
# we do that here by calling make_optimizer_class() explicitly, though DP
# versions of standard optimizers are available in dp_optimizer.
dp_optimizer_class = dp_optimizer.make_optimizer_class(
tf.train.GradientDescentOptimizer)
optimizer = dp_optimizer_class(
learning_rate=FLAGS.learning_rate,
noise_multiplier=FLAGS.noise_multiplier,
l2_norm_clip=FLAGS.l2_norm_clip,
num_microbatches=FLAGS.microbatches)
opt_loss = vector_loss
else:
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
# used for evaluation and debugging by tf.estimator. The actual loss being
# minimized is opt_loss defined above and passed to optimizer.minimize().
return tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode).
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
return tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops)
def load_mnist():
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
assert len(train_labels.shape) == 1
assert len(test_labels.shape) == 1
return train_data, train_labels, test_data, test_labels
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Instantiate the tf.Estimator.
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
model_dir=FLAGS.model_dir)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
y=test_labels,
num_epochs=1,
shuffle=False)
# Define a function that computes privacy budget expended so far.
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / 60000
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because MNIST has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
# Training loop.
steps_per_epoch = 60000 // FLAGS.batch_size
for epoch in range(1, FLAGS.epochs + 1):
# Train the model for one epoch.
mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
# Evaluate the model and print results
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
test_accuracy = eval_results['accuracy']
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
# Compute the privacy budget expended so far.
if FLAGS.dpsgd:
eps = compute_epsilon(epoch * steps_per_epoch)
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
else:
print('Trained with vanilla non-private SGD optimizer')
# Export the model
if FLAGS.export_dir is not None:
# [-1, 28, 28, 1]
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'x': image,
})
mnist_classifier.export_savedmodel(FLAGS.export_dir, input_fn)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
9eeb1275039275399a55eee9a0ae55d20a3de792 | 61a02aba5dde7c29ec65a87eb8a20af12d6c2b47 | /python basic/3118_최단경로2.py | ccce1ec174e94a7de53db843f1c74aeaad387cdd | [] | no_license | hksoftcorn/OfficialDocs | 0b4d0e2a71707e06ba7516e34ad176ee02726587 | cfd87d26efad484657f9493dead350cf0611a3e8 | refs/heads/master | 2023-06-30T07:09:33.641869 | 2021-07-31T14:35:28 | 2021-07-31T14:35:28 | 374,389,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | V, E = map(int, input().split())
G = [[] for _ in range(V + 1)]
for _ in range(E):
u, v, w = map(int, input().split())
G[u].append((v, w))
visited = [False] * (V + 1)
dist = [0xfffffff] * (V + 1)
dist[1] = 0
p = [0] * (V + 1)
for _ in range(V):
u, min_key = 1, 0xfffffff
for i in range(1, V+1):
if not visited[i] and min_key > dist[i]:
u, min_key = i, dist[i]
visited[u] = True
for v, w in G[u]:
if not visited[v] and dist[v] > dist[u] + w:
dist[v] = dist[u] + w
p[v] = u
print(dist[V])
| [
"[email protected]"
] | |
6f4373a988fbcd023ca39c1755c9d361c3e7daff | 2fd14347b7f43864d8153bd1c6d79198302d21ea | /ex.002 root finding/nr_problem_case.py | 3d33bede021e71d689a6e8c5cd4a3b1edf781a2e | [] | no_license | family9od/ECAre | 0fe27ff290eaa702c754fedef8953260a67592fc | ea875ea14be9d99a5e4f2191382e6eedc702b557 | refs/heads/master | 2020-06-17T02:33:30.651909 | 2016-11-15T07:45:31 | 2016-11-15T07:45:31 | 75,047,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # -*- coding: utf8 -*-
# 2010112033 이상형 9/20
"""
1변수 방정식의 근을 찾느 방법 중 Newton-Raphson method 를 사용하여
어떤 함수 g(x) 의 근을 찾고자 함
아래 예는 newton_raphson method 를 사용하기 곤란한 경우임
"""
# 1 변수 방정식의 근을 찾는 함수를 모아둔 rootfinding 모듈을 불러들임
import rootfinding as rf
def g(x):
# 근을 구하고자 하는 함수
return x ** 3 - 2 * x + 2
def dgdx(x):
# g(x) 의 x 에 대한 미분
return 3.0 * x ** 2.0 - 2.0
if "__main__" == __name__:
# 주어진 초기값에서 시작하여 g(x) = 0 인 x를 찾고자 함
# 생각보다 시간이 많이 걸릴 수 있음
x_nr = rf.newton(g, dgdx, 0)
print('x = %g, f(%g) = %g' % (x_nr, x_nr, g(x_nr)))
| [
"CAD Client"
] | CAD Client |
9ae009652986c6b459f9d867a41a6f768070ebda | e28ce5cca66c56ee7446a46e18375430d0d404eb | /toys/12_go/gopy/go/location.py | 3b4f56c0d82834ba26f9afa924ca8d0bbcdfb3a8 | [
"MIT"
] | permissive | git4robot/PyKids | 4fb60c5b107527336d9e686a98988ba7a8354f31 | 866e45e13171322ad1892d604508cfee9f8086c8 | refs/heads/master | 2020-04-17T20:45:26.741363 | 2020-04-14T02:21:55 | 2020-04-14T02:21:55 | 166,919,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | from go.utils import bold
class LocationError(Exception):
pass
class Location(object):
TYPES = {
'black': bold('*'),
'white': bold('o'),
'empty': '.',
}
def __init__(self, type):
if type not in self.TYPES:
raise LocationError('Type must be one of the following: {0}'.format(
self.TYPES.keys(),
))
self._type = type
def __eq__(self, other):
return self._type == other._type
def __hash__(self):
return hash(self._type)
def __str__(self):
return self.TYPES[self._type]
def __repr__(self):
return self._type.title()
| [
"[email protected]"
] | |
ac07a53e15aef0bb493402d8d4c3712a747239bb | 3a6cbe6940b657ac6b608ce93d8d41ffeb6b9e65 | /rocon_python_comms/src/rocon_python_comms/service_pair_server.py | 8263080327cd5dc872bcd1d3fefb91715b3bd6bf | [] | no_license | robotics-in-concert/rocon_tools | cdfc4ccfc04b79262fb151640966a33bd0b5f498 | 1f182537b26e8622eefaf6737d3b3d18b1741ca6 | refs/heads/devel | 2021-01-17T01:58:12.163878 | 2018-02-06T15:20:29 | 2018-02-06T15:20:29 | 15,774,638 | 7 | 22 | null | 2017-08-16T06:39:47 | 2014-01-09T18:02:42 | Python | UTF-8 | Python | false | false | 6,195 | py | #
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_tools/license/LICENSE
#
##############################################################################
# Description
##############################################################################
"""
.. module:: service_pair_server
:platform: Unix
:synopsis: Server side api for communicating across a rocon service pair.
This module contains the server side api for communicating across a rocon
service pair. A `facade pattern`_ is used here to simplify the interaction with
the server side publisher and subscriber.
.. include:: weblinks.rst
----
"""
##############################################################################
# Imports
##############################################################################
import rospy
import threading
# Local imports
from .exceptions import ServicePairException
##############################################################################
# Server Class
##############################################################################
class ServicePairServer(object):
'''
The server side of a pubsub service pair. This class provides a simplified
api for handling requests/responses on the pubsub pair. There are two
modes of operation - 1) blocking and 2) threaded.
**Non-Threaded**
In the first, the users' callback function directly runs whenever an
incoming request is received. In this case, your callbacks should be
very minimal so that incoming requests don't get blocked and queued up.
.. code-block:: python
#!/usr/bin/env python
import rospy
from chatter.msg import ChatterRequest, ChatterResponse, ChatterPair
from rocon_python_comms import ServicePairServer
class ChatterServer(object):
def __init__(self):
self.server = ServicePairServer('chatter', self.callback, ChatterPair)
def callback(self, request_id, msg):
rospy.loginfo("Server : I heard %s" % msg.babble)
response = ChatterResponse()
response.reply = "I heard %s" % msg.babble
self.server.reply(request_id, response)
if __name__ == '__main__':
rospy.init_node('chatter_server', anonymous=True)
chatter_server = ChatterServer()
rospy.spin()
**Threaded**
In the second, we spawn a background thread and shunt the callback into this thread.
Just toggle the ``use_threads`` flag when constructing the server:
.. code-block:: python
self.server = ServicePairServer('chatter', self.callback, ChatterPair, use_threads=True)
'''
__slots__ = [
'_publisher',
'_subscriber',
'_callback',
'_use_threads',
#'_request_handlers', # initiate, track and execute requests with these { hex string ids : dic of RequestHandler objects (Blocking/NonBlocking) }
'ServicePairSpec',
'ServicePairRequest',
'ServicePairResponse',
]
##########################################################################
# Initialisation
##########################################################################
def __init__(self, name, callback, ServicePairSpec, use_threads=False, queue_size=5):
'''
:param str name: resource name of service pair (e.g. testies for pair topics testies/request, testies/response)
:param callback: function invoked when a request arrives
:param ServicePairSpec: the pair type (e.g. rocon_service_pair_msgs.msg.TestiesPair)
:param bool use_threads: put the callback function into a fresh background thread when a request arrives.
:param int queue_size: size of the queue to configure the publisher with.
'''
self._callback = callback
self._use_threads = use_threads
try:
p = ServicePairSpec()
self.ServicePairSpec = ServicePairSpec
"""Base message type for this pair."""
self.ServicePairRequest = type(p.pair_request)
"""Request msg type for this pair <ServicePairSpec>Request."""
self.ServicePairResponse = type(p.pair_response)
"""Response msg type for this pair <ServicePairSpec>Response."""
except AttributeError:
raise ServicePairException("Type is not an pair spec: %s" % str(ServicePairSpec))
self._subscriber = rospy.Subscriber(name + "/request", self.ServicePairRequest, self._internal_callback)
self._publisher = rospy.Publisher(name + "/response", self.ServicePairResponse, queue_size=queue_size)
##########################################################################
# Public Methods
##########################################################################
def reply(self, request_id, msg):
'''
Send a reply to a previously received request (identified by request_id). Use this
instead of writing directly to the publisher - just pass the content of the
response data and the id that was issued with the request.
:param uuid_msgs.UniqueID request_id: the request id to associate with this response.
:param ServiceResponse msg: the response
'''
pair_response = self.ServicePairResponse()
pair_response.id = request_id
pair_response.response = msg
self._publisher.publish(pair_response)
##########################################################################
# Callbacks
##########################################################################
def _internal_callback(self, msg):
'''
:param ServicePairRequest msg: message returned from the server (with pair id etc)
'''
# Check if it is a blocking call that has requested it.
if self._use_threads:
thread = threading.Thread(target=self._callback, args=(msg.id, msg.request))
thread.start()
else:
self._callback(msg.id, msg.request)
| [
"[email protected]"
] | |
e22da16a3630862721200de043c23202f838489d | e906fe8237e5b55b7bef1f7a87884c5924ccd8b1 | /contactmps/migrations/0024_committee.py | b8cb77c7198df94f7b6f8955173bff4743b0fb99 | [
"MIT"
] | permissive | OpenUpSA/contact-mps | ac9a88ef166769d6305e213f3d77191f385c962a | 63d7f86e1b6c9319a4d0344a6125cd22770f34c7 | refs/heads/master | 2022-12-11T07:22:20.942567 | 2020-01-15T13:11:59 | 2020-01-15T13:11:59 | 93,042,651 | 0 | 2 | MIT | 2022-12-08T02:08:08 | 2017-06-01T09:52:56 | JavaScript | UTF-8 | Python | false | false | 894 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-05-14 15:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contactmps', '0023_campaign_include_link_in_email'),
]
operations = [
migrations.CreateModel(
name='Committee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, unique=True)),
('slug', models.CharField(max_length=300, unique=True)),
('email_address', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
75204bbfc5d050883078af710ce97469e69c1335 | a089fab4b0e363ba48bff57b3948c32172570e8f | /home_connect_sdk/models/__init__.py | 311a2dad6bac50ae69888c78797c9a6745803fa0 | [] | no_license | jeroenvdwaal/home-connect-sdk | ed2e44a01b72d64d17d41af8400eb2e42792232c | 3c0ab6791bb0e9df95154f8f177d889ebef0c749 | refs/heads/master | 2022-04-23T01:20:32.621570 | 2020-04-26T09:40:16 | 2020-04-26T09:40:16 | 255,988,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,060 | py | # coding: utf-8
# flake8: noqa
"""
Home Connect API
This API provides access to home appliances enabled by Home Connect (https://home-connect.com). Through the API programs can be started and stopped, or home appliances configured and monitored. For instance, you can start a cotton program on a washer and get a notification when the cycle is complete. To get started with this web client, visit https://developer.home-connect.com and register an account. An application with a client ID for this API client will be automatically generated for you. In order to use this API in your own client, you need an OAuth 2 client implementing the authorization code grant flow (https://developer.home-connect.com/docs/authorization/flow). More details can be found here: https://www.rfc-editor.org/rfc/rfc6749.txt Authorization URL: https://api.home-connect.com/security/oauth/authorize Token URL: https://api.home-connect.com/security/oauth/token # noqa: E501
The version of the OpenAPI document: 1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from home_connect_sdk.models.active_program_not_set_error import ActiveProgramNotSetError
from home_connect_sdk.models.array_of_available_programs import ArrayOfAvailablePrograms
from home_connect_sdk.models.array_of_available_programs_data import ArrayOfAvailableProgramsData
from home_connect_sdk.models.array_of_available_programs_data_constraints import ArrayOfAvailableProgramsDataConstraints
from home_connect_sdk.models.array_of_available_programs_data_programs import ArrayOfAvailableProgramsDataPrograms
from home_connect_sdk.models.array_of_events import ArrayOfEvents
from home_connect_sdk.models.array_of_events_items import ArrayOfEventsItems
from home_connect_sdk.models.array_of_home_appliances import ArrayOfHomeAppliances
from home_connect_sdk.models.array_of_home_appliances_data import ArrayOfHomeAppliancesData
from home_connect_sdk.models.array_of_home_appliances_data_homeappliances import ArrayOfHomeAppliancesDataHomeappliances
from home_connect_sdk.models.array_of_images import ArrayOfImages
from home_connect_sdk.models.array_of_images_data import ArrayOfImagesData
from home_connect_sdk.models.array_of_images_data_images import ArrayOfImagesDataImages
from home_connect_sdk.models.array_of_options import ArrayOfOptions
from home_connect_sdk.models.array_of_options_data import ArrayOfOptionsData
from home_connect_sdk.models.array_of_programs import ArrayOfPrograms
from home_connect_sdk.models.array_of_programs_data import ArrayOfProgramsData
from home_connect_sdk.models.array_of_programs_data_constraints import ArrayOfProgramsDataConstraints
from home_connect_sdk.models.array_of_programs_data_programs import ArrayOfProgramsDataPrograms
from home_connect_sdk.models.array_of_settings import ArrayOfSettings
from home_connect_sdk.models.array_of_settings_data import ArrayOfSettingsData
from home_connect_sdk.models.array_of_settings_data_settings import ArrayOfSettingsDataSettings
from home_connect_sdk.models.array_of_status import ArrayOfStatus
from home_connect_sdk.models.array_of_status_data import ArrayOfStatusData
from home_connect_sdk.models.command import Command
from home_connect_sdk.models.command_data import CommandData
from home_connect_sdk.models.command_definition import CommandDefinition
from home_connect_sdk.models.command_definition_data import CommandDefinitionData
from home_connect_sdk.models.conflict import Conflict
from home_connect_sdk.models.conflict_error import ConflictError
from home_connect_sdk.models.forbidden_error import ForbiddenError
from home_connect_sdk.models.get_setting import GetSetting
from home_connect_sdk.models.get_setting_data import GetSettingData
from home_connect_sdk.models.get_setting_data_constraints import GetSettingDataConstraints
from home_connect_sdk.models.home_appliance import HomeAppliance
from home_connect_sdk.models.home_appliance_data import HomeApplianceData
from home_connect_sdk.models.interal_server_error import InteralServerError
from home_connect_sdk.models.no_program_active_error import NoProgramActiveError
from home_connect_sdk.models.no_program_selected_error import NoProgramSelectedError
from home_connect_sdk.models.not_acceptable_error import NotAcceptableError
from home_connect_sdk.models.not_found_error import NotFoundError
from home_connect_sdk.models.option import Option
from home_connect_sdk.models.option_data import OptionData
from home_connect_sdk.models.program import Program
from home_connect_sdk.models.program_data import ProgramData
from home_connect_sdk.models.program_data_options import ProgramDataOptions
from home_connect_sdk.models.program_definition import ProgramDefinition
from home_connect_sdk.models.program_definition_data import ProgramDefinitionData
from home_connect_sdk.models.program_definition_data_constraints import ProgramDefinitionDataConstraints
from home_connect_sdk.models.program_definition_data_options import ProgramDefinitionDataOptions
from home_connect_sdk.models.program_not_available_error import ProgramNotAvailableError
from home_connect_sdk.models.put_setting import PutSetting
from home_connect_sdk.models.put_setting_data import PutSettingData
from home_connect_sdk.models.put_setting_data_constraints import PutSettingDataConstraints
from home_connect_sdk.models.request_timeout_error import RequestTimeoutError
from home_connect_sdk.models.selected_program_not_set_error import SelectedProgramNotSetError
from home_connect_sdk.models.status import Status
from home_connect_sdk.models.status_data import StatusData
from home_connect_sdk.models.too_many_requests_error import TooManyRequestsError
from home_connect_sdk.models.unauthorized_error import UnauthorizedError
from home_connect_sdk.models.unauthorized_error_error import UnauthorizedErrorError
from home_connect_sdk.models.unsupported_media_type_error import UnsupportedMediaTypeError
from home_connect_sdk.models.wrong_operation_state_error import WrongOperationStateError
| [
"[email protected]"
] | |
3652f4d252652605a8f6ef2c32218b505955d203 | 627094b5e463bd113f626450eaceb01dfa4ff5d5 | /test/client/test_link_control.py | 0e4014b88ba456056e0e54eef493cfb4f701e752 | [
"MIT"
] | permissive | DaleChen0351/python-udsoncan | 49eefcb299e2a4fabe0bf168905cc86ef43d6f62 | c495e872c69c4ea05e3b477d2a1088cb83167a17 | refs/heads/master | 2020-04-20T06:10:25.252315 | 2019-03-28T07:38:17 | 2019-03-28T07:38:17 | 168,675,483 | 0 | 0 | MIT | 2019-03-28T07:38:19 | 2019-02-01T09:42:02 | Python | UTF-8 | Python | false | false | 6,890 | py | from udsoncan.client import Client
from udsoncan import services, Baudrate
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestLinkContorl(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_linkcontrol_verify_fixed(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x01\x11")
self.conn.fromuserqueue.put(b"\xC7\x01") # Positive response
def _test_linkcontrol_verify_fixed(self):
baudrate = Baudrate(250000, baudtype=Baudrate.Type.Fixed)
response = self.udsclient.link_control(control_type=1, baudrate=baudrate)
self.assertTrue(response.valid)
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 1)
def test_linkcontrol_verify_fixed_spr(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x81\x11")
self.conn.fromuserqueue.put("wait") # Synchronize
def _test_linkcontrol_verify_fixed_spr(self):
baudrate = Baudrate(250000, baudtype=Baudrate.Type.Fixed)
with self.udsclient.suppress_positive_response:
response = self.udsclient.link_control(control_type=1, baudrate=baudrate)
self.assertEqual(response, None)
self.conn.fromuserqueue.get(timeout=0.2) #Avoid closing connection prematurely
def test_linkcontrol_verify_fixed_from_specific(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x01\x11")
self.conn.fromuserqueue.put(b"\xC7\x01") # Positive response
def _test_linkcontrol_verify_fixed_from_specific(self):
baudrate = Baudrate(250000, baudtype=Baudrate.Type.Specific)
response = self.udsclient.link_control(control_type=1, baudrate=baudrate)
self.assertTrue(response.valid)
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 1)
def test_linkcontrol_verify_specific(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x02\x12\x34\x56")
self.conn.fromuserqueue.put(b"\xC7\x02") # Positive response
def _test_linkcontrol_verify_specific(self):
baudrate = Baudrate(0x123456, baudtype=Baudrate.Type.Specific)
response = self.udsclient.link_control(control_type=2, baudrate=baudrate)
self.assertTrue(response.valid)
self.assertTrue(response.positive)
self.assertEqual(response.service_data.control_type_echo, 2)
def test_linkcontrol_verify_specific_from_fixed(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x02\x07\xA1\x20")
self.conn.fromuserqueue.put(b"\xC7\x02") # Positive response
def _test_linkcontrol_verify_specific_from_fixed(self):
baudrate = Baudrate(500000, baudtype=Baudrate.Type.Fixed)
response = self.udsclient.link_control(control_type=2, baudrate=baudrate)
self.assertTrue(response.valid)
self.assertTrue(response.positive)
def test_linkcontrol_custom_control_type(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x87\x55")
self.conn.fromuserqueue.put(b"\xC7\x55") # Positive response
def _test_linkcontrol_custom_control_type(self):
response = self.udsclient.link_control(control_type=0x55)
self.assertTrue(response.valid)
self.assertTrue(response.positive)
def test_linkcontrol_negative_response_exception(self):
self.wait_request_and_respond(b"\x7F\x87\x31") # Request Out Of Range
def _test_linkcontrol_negative_response_exception(self):
with self.assertRaises(NegativeResponseException):
self.udsclient.link_control(control_type=0x55)
def test_linkcontrol_negative_response_no_exception(self):
self.wait_request_and_respond(b"\x7F\x87\x31") # Request Out Of Range
def _test_linkcontrol_negative_response_no_exception(self):
self.udsclient.config['exception_on_negative_response'] = False
response = self.udsclient.link_control(control_type=0x55)
self.assertTrue(response.valid)
self.assertFalse(response.positive)
def test_linkcontrol_invalidservice_exception(self):
self.wait_request_and_respond(b"\x00\x22") # Request Out Of Range
def _test_linkcontrol_invalidservice_exception(self):
with self.assertRaises(InvalidResponseException):
self.udsclient.link_control(control_type=0x55)
def test_linkcontrol_invalidservice_no_exception(self):
self.wait_request_and_respond(b"\x00\x22") # Request Out Of Range
def _test_linkcontrol_invalidservice_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
response = self.udsclient.link_control(control_type=0x55)
self.assertFalse(response.valid)
def test_linkcontrol_wrongservice_exception(self):
self.wait_request_and_respond(b"\x7E\x22") # Valid but wrong service (Tester Present)
def _test_linkcontrol_wrongservice_exception(self):
with self.assertRaises(UnexpectedResponseException):
self.udsclient.link_control(control_type=0x55)
def test_linkcontrol_wrongservice_no_exception(self):
self.wait_request_and_respond(b"\x7E\x22") # Valid but wrong service (Tester Present)
def _test_linkcontrol_wrongservice_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.link_control(control_type=0x55)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_linkcontrol_bad_control_type_exception(self):
self.wait_request_and_respond(b"\xC7\x08") # Valid but bad control type
def _test_linkcontrol_bad_control_type_exception(self):
with self.assertRaises(UnexpectedResponseException):
self.udsclient.link_control(control_type=0x55)
def test_linkcontrol_bad_control_type_no_exception(self):
self.wait_request_and_respond(b"\xC7\x08") # Valid but bad control type
def _test_linkcontrol_bad_control_type_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.link_control(control_type=0x55)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_param(self):
pass
def _test_bad_param(self):
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type='x')
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=0x80)
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=1) # Missing Baudrate
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=2) # Missing Baudrate
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=0, baudrate=Baudrate(500000)) # Baudrate is not needed
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=1, baudrate=1) # Baudrate should be Baudrate instance
with self.assertRaises(ValueError):
self.udsclient.link_control(control_type=1, baudrate='x') # Baudrate should be Baudrate instance
| [
"[email protected]"
] | |
3e865ff8ba54efeccf0945858bdb43e9be54a743 | 837762524db70b805fbf46f62a14be32e32dabd9 | /scripts/train.py | df35cdfb4e3c068ebba443e31d700f6c49358b2b | [
"Apache-2.0"
] | permissive | jordancaraballo/nga-deep-learning | 832e54afb978a84875d1c09a7c00055e698f2a7b | 752266ccc06efacdef2423214998ecfced7eafb7 | refs/heads/master | 2023-06-30T14:39:49.448265 | 2021-07-27T20:00:52 | 2021-07-27T20:00:52 | 343,627,410 | 23 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,992 | py | # --------------------------------------------------------------------------
# Preprocessing and dataset creation from NGA data. This assumes you provide
# a configuration file with required parameters and files.
# --------------------------------------------------------------------------
import os # system modifications
import sys # system modifications
import time # tracking time
import numpy as np # for arrays modifications
import cupy as cp # for arrays modifications
import tensorflow as tf # deep learning framework
from core.unet import unet_batchnorm # unet network to work with
from core.utils import get_training_dataset # getting training dataset
from core.utils import get_tensorslices # getting tensor slices
from core.utils import gen_callbacks # generate callbacks
# tensorflow imports
# from tensorflow.keras.mixed_precision import experimental as mixed_precision
from tensorflow.keras import mixed_precision
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import Adadelta
# define configuration object
from config import Config
config = Config.Configuration()
__author__ = "Jordan A Caraballo-Vega, Science Data Processing Branch"
__email__ = "[email protected]"
__status__ = "Development"
# Define some environment variables to help refining randomness.
# Note: there might still be some randomness since most of the code
# is ran on GPU and sometimes parallelization brings changes.
np.random.seed(config.SEED)
tf.random.set_seed(config.SEED)
cp.random.seed(config.SEED)
print(f"Tensorflow ver. {tf.__version__}")
# verify GPU devices are available and ready
os.environ['CUDA_VISIBLE_DEVICES'] = config.CUDA
devices = tf.config.list_physical_devices('GPU')
assert len(devices) != 0, "No GPU devices found."
# ------------------------------------------------------------------
# System Configurations
# ------------------------------------------------------------------
if config.MIRROR_STRATEGY:
strategy = tf.distribute.MirroredStrategy()
print('Multi-GPU enabled')
if config.MIXED_PRECISION:
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
print('Mixed precision enabled')
if config.XLA_ACCELERATE:
tf.config.optimizer.set_jit(True)
print('Accelerated Linear Algebra enabled')
# Disable AutoShard, data lives in memory, use in memory options
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
# ---------------------------------------------------------------------------
# script train.py
# ---------------------------------------------------------------------------
def main():
# Main function to collect configuration file and run the script
print(f'GPU REPLICAS: {strategy.num_replicas_in_sync}')
t0 = time.time()
print(f'Train dir: {config.TRAIN_DATADIR}')
print(f'Validation dir: {config.VAL_DATADIR}')
# Initialize Callbacks
callbacks = gen_callbacks(config, config.CALLBACKS_METADATA)
# open files and get dataset tensor slices
train_images, train_labels = get_tensorslices(
data_dir=config.TRAIN_DATADIR, img_id='x', label_id='y'
)
# open files and get dataset tensor slices
val_images, val_labels = get_tensorslices(
data_dir=config.VAL_DATADIR, img_id='x', label_id='y'
)
# extract values for training
NUM_TRAINING_IMAGES = train_images.shape[0]
NUM_VALIDATION_IMAGES = val_images.shape[0]
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // config.BATCH_SIZE
print(f'{NUM_TRAINING_IMAGES} training images')
print(f'{NUM_VALIDATION_IMAGES} validation images')
# generate training dataset
train_dataset = \
tf.data.Dataset.from_tensor_slices((train_images, train_labels))
# generate validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((val_images, val_labels))
val_dataset = val_dataset.batch(config.VAL_BATCH_SIZE)
# Create model output directory
os.system(f'mkdir -p {config.MODEL_SAVEDIR}')
# Initialize and compile model
with strategy.scope():
# initialize UNet model
model = unet_batchnorm(
nclass=config.N_CLASSES, input_size=config.INPUT_SIZE,
maps=config.MODEL_METADATA['network_maps']
)
# initialize optimizer, exit of not valid optimizer
if config.MODEL_METADATA['optimizer_name'] == 'Adadelta':
optimizer = Adadelta(lr=config.MODEL_METADATA['lr'])
elif config.MODEL_METADATA['optimizer_name'] == 'Adam':
optimizer = Adam(lr=config.MODEL_METADATA['lr'])
else:
sys.exit('Optimizer provided is not supported.')
# enabling mixed precision to avoid underflow
optimizer = mixed_precision.LossScaleOptimizer(optimizer)
# compile model to start training
model.compile(
optimizer,
loss=config.MODEL_METADATA['loss'],
metrics=config.MODEL_METADATA['metrics']
)
model.summary()
# Disable AutoShard, data lives in memory, use in memory options
train_dataset = train_dataset.with_options(options)
val_dataset = val_dataset.with_options(options)
# Train the model and save to disk
model.fit(
get_training_dataset(
train_dataset,
config,
do_aug=config.MODEL_METADATA['do_aug']
),
initial_epoch=config.START_EPOCH,
epochs=config.N_EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=val_dataset,
callbacks=callbacks,
verbose=2
)
print(f'Execution time: {time.time() - t0}')
# -------------------------------------------------------------------------------
# main
# -------------------------------------------------------------------------------
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
72111d9069d5463f365998e1f2428329f7f7f195 | a79ab025913ba5a96b11bd506d9915f4533f4029 | /golfProj/golf_app/templatetags/golf_extras.py | 0dd0f7aa73defb8cb95847a41ca1632adbae8a5b | [] | no_license | jflynn87/golf_game | 2533548b1b8313661216446ddfa7927b63717118 | a24f710fbc39d25cc93b5b4c5c4d6575ef38c6bb | refs/heads/master | 2022-07-11T00:27:46.765936 | 2019-04-17T03:07:45 | 2019-04-17T03:07:45 | 174,344,305 | 0 | 0 | null | 2022-07-06T20:01:36 | 2019-03-07T12:57:45 | Python | UTF-8 | Python | false | false | 1,733 | py | from django import template
from golf_app.models import Picks, mpScores, Field, Tournament, Group
from django.db.models import Count
register = template.Library()
@register.filter
def model_name(obj):
return obj._meta.verbose_name
@register.filter
def currency(dollars):
dollars = int(dollars)
return '$' + str(dollars)
@register.filter
def line_break(count):
user_cnt = Picks.objects.filter(playerName__tournament__current=True).values('playerName__tournament').annotate(Count('user', distinct=True))
if (count -1) % (user_cnt[0].get('user__count')) == 0 or count == 0:
return True
else:
return False
@register.filter
def first_round(pick):
field = Field.objects.get(tournament__pga_tournament_num='470', playerName=pick)
wins = mpScores.objects.filter(player=field, round__lt=4, result="Yes").count()
losses = mpScores.objects.filter(player=field, round__lt=4, result="No").exclude(score="AS").count()
ties = mpScores.objects.filter(player=field, round__lt=4, score="AS").count()
return str(wins) + '-' + str(losses) + '-' + str(ties)
@register.filter
def leader(group):
#print ('group', group)
tournament = Tournament.objects.get(pga_tournament_num="470")
grp = Group.objects.get(tournament=tournament,number=group)
field = Field.objects.filter(tournament=tournament, group=grp)
golfer_dict = {}
for golfer in field:
golfer_dict[golfer.playerName] = int(first_round(golfer.playerName)[0]) + (.5*int(first_round(golfer.playerName)[4]))
#print ('leader', [k for k, v in golfer_dict.items() if v == max(golfer_dict.values())])
winner= [k for k, v in golfer_dict.items() if v == max(golfer_dict.values())]
return winner
| [
"[email protected]"
] | |
43897fd79e93876b6bb01c316ff69f8ac715aa83 | 4de0c6d3a820d7669fcef5fd035416cf85b35f23 | /ITcoach/爬虫课件/第三章:数据解析/6.xpath解析案例-58二手房.py | d01d163b95860803cf0863b3b681c3a5e230439b | [
"AFL-3.0"
] | permissive | ww35133634/chenxusheng | 5e1b7391a94387b73bcd7c4d12f1247b79be8016 | 666e0eb3aedde46342faf0d4030f5c72b10c9732 | refs/heads/master | 2022-11-12T03:46:47.953680 | 2020-07-02T20:50:56 | 2020-07-02T20:50:56 | 275,168,080 | 0 | 0 | AFL-3.0 | 2020-07-02T20:58:37 | 2020-06-26T13:54:48 | HTML | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
from lxml import etree
#需求:爬取58二手房中的房源信息
if __name__ == "__main__":
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
#爬取到页面源码数据
url = 'https://bj.58.com/ershoufang/'
page_text = requests.get(url=url,headers=headers).text
#数据解析
tree = etree.HTML(page_text)
#存储的就是li标签对象
li_list = tree.xpath('//ul[@class="house-list-wrap"]/li')
fp = open('58.txt','w',encoding='utf-8')
for li in li_list:
#局部解析
title = li.xpath('./div[2]/h2/a/text()')[0]
print(title)
fp.write(title+'\n')
| [
"[email protected]"
] | |
04e63b41a7f0e2b684daa0deadb5d48becf59923 | 8fd2e5d53d7a91d35288ccefdb0c7ef00d927a0a | /book_06_Python黑帽子/Chapter03/网口嗅探多网段版(Bug).py | a690d64efc8c84b89fe615b495c918e4ec44349e | [] | no_license | atlasmao/Python-book-code | 03501f9ca2e81bc1f47464b3227c7f9cda0d387c | 03b6848a15a7e4c2ffebdc3528c24a8b101d9f41 | refs/heads/master | 2022-01-06T23:45:21.951307 | 2019-07-11T10:32:22 | 2019-07-11T10:32:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import socket
import os
import struct
import threading
import time
from netaddr import IPNetwork, IPAddress
from ctypes import *
# 监听的主机
host = '10.0.76.1'
# 扫描的目标子网
subnet = '10.0.10.0/24'
subnet_list = []
host_up_num = 0
# 自定义的字符串, 我们将在 ICMP 响应中进行核对
magic_message = "PYTHONRULES!"
# 生成子网列表
def add_subnet(subnet):
temp_list = subnet.split(".")
for i in range(256):
temp_list[2] = str(i)
subnet_list.append(".".join(temp_list))
# 批量发送 UDP 数据包
def udp_sender(subnet, magic_message):
time.sleep(5)
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message, ("{}".format(ip), 65212))
except:
pass
finally:
sender.close()
# ip 头定义
class IP(Structure):
_fields_ = [
("ihl", c_ubyte, 4),
("version", c_ubyte, 4),
("tos", c_ubyte),
("len", c_ushort),
("id", c_ushort),
("offset", c_ushort),
("ttl", c_ubyte),
("protocol_num", c_ubyte),
("sum", c_ushort),
# ("src", c_ulong),
("src", c_uint32),
# ("dst", c_ulong)
("dst", c_uint32)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# 协议字段与协议名称对应
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
# 可读性更强的 ip 地址
# self.src_address = socket.inet_ntoa(struct.pack("<L", self.src))
self.src_address = socket.inet_ntoa(struct.pack("@I", self.src))
# self.dst_address = socket.inet_ntoa(struct.pack("<L", self.dst))
self.dst_address = socket.inet_ntoa(struct.pack("@I", self.dst))
# 协议类型
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
class ICMP(Structure):
_fields_ = [
("type", c_ubyte),
("code", c_ubyte),
("checksum", c_ushort),
("unused", c_ushort),
("next_hop_mtu", c_ushort)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
# 创建原始套接字, 然后绑定在公开接口上
if os.name == "nt":
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
# 设置在捕获的数据包中包含 ip 头
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# 在 Windows 平台上, 我们需要设置 IOCTL 以启动混杂模式
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
add_subnet(subnet)
for new_subnet in subnet_list:
print(new_subnet)
# 开始发送数据包
t = threading.Thread(target=udp_sender, args=(new_subnet, magic_message))
t.start()
try:
while True:
# 读取数据包
raw_buffer = sniffer.recvfrom(65565)[0]
# 将缓冲区的前 20 个字节按 ip 头进行解析
ip_header = IP(raw_buffer[0:20])
# TODO: 可以开启或关闭
# 输出协议和通信双方 ip 地址
# print "Protocol: {} {} -> {}".format(ip_header.protocol, ip_header.src_address, ip_header.dst_address)
# 如果为 ICMP, 进行处理
if ip_header.protocol == "ICMP":
# 计算 ICMP 包的起始位置
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + sizeof(ICMP)]
# 解析 ICMP 数据
icmp_header = ICMP(buf)
# print "ICMP -> Type: {} Code: {}".format(icmp_header.type, icmp_header.code)
# 检查类型和代码值是否为 3
if icmp_header.code == 3 and icmp_header.type == 3:
# 确认响应的主机在我们的目标子网之内
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
# 确认 ICMP 数据中包含我们发送的自定义的字符串
if raw_buffer[len(raw_buffer) - len(magic_message):] == magic_message:
print("Host Up: {}".format(ip_header.src_address))
host_up_num += 1
print("Host Up Number: {}".format(host_up_num))
# 处理 CTRL-C
except KeyboardInterrupt:
# 如果运行在 Windows 上, 关闭混杂模式
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
| [
"[email protected]"
] | |
6a05561304bd78df0efc71b62b3659469610fd24 | f38193df76e7f86ad4017ec62dd7c90ce92e9b91 | /_src/om2py3w/3wex0/diary-server.py | 20b7585eed6f3dee62bf2c7213284c338396cdce | [
"MIT"
] | permissive | isynch/OMOOC2py | dcf54f9d2012d018c3b280d28d65058e6ae1dc08 | cc7fafc106b56553306988d07f0a4ab61bc39201 | refs/heads/master | 2020-04-25T23:30:15.410512 | 2015-12-05T07:31:56 | 2015-12-05T07:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # -*- coding: utf-8 -*-
from datetime import datetime
import socket
import sys
HOST = '' # Symbolic name meaning all available interfaces
PORT = 1234 # Arbitary non-privileged port
# Datagram(udp) socket
try:
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
print '创建中...'
except socket.error, msg:
print 'Failed to create socket. Error Code : '+str(msg[0])+' Message ' +msg[1]
sys.exit()
# Bind socket to local host and port
try:
s.bind((HOST,PORT))
except socket.error, msg:
print 'Bind failed. Error Code: '+str(msg[0])+' Message '+msg[1]
sys.exit()
print '连接中...'
#now keep taling with the client
while 1:
# receive data from client(data, addr)
d=s.recvfrom(1024)
data=d[0]
addr=d[1]
if not data:
break
today=datetime.now()
diary=data.strip()
print diary
diaryFile = open('diary.txt','a')
diaryFile.write('\n'+today.strftime("%y/%m/%d")+' client['+str(addr[1])+'] '+ diary)
diaryFile.close()
diaryFile = open('diary.txt')
diary = diaryFile.read()
print('============日记============')
print(diary)
reply='帮你记录下来啦。日记:'+data
s.sendto(reply,addr)
s.close()
| [
"[email protected]"
] | |
49f7dbbdfffd887a721bcc1a2ee1ced7e8de18d3 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/netvisor/pn_cpu_class.py | fadbed03e41b7d154a3530d1d8ce9f13d78ed446 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 5,894 | py | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pn_cpu_class
author: "Pluribus Networks (@rajaspachipulusu17)"
short_description: CLI command to create/modify/delete cpu-class
description:
- This module can be used to create, modify and delete CPU class information.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(present) to create cpu-class and
C(absent) to delete cpu-class C(update) to modify the cpu-class.
required: True
type: str
choices: ['present', 'absent', 'update']
pn_scope:
description:
- scope for CPU class.
required: false
choices: ['local', 'fabric']
pn_hog_protect:
description:
- enable host-based hog protection.
required: False
type: str
choices: ['disable', 'enable', 'enable-and-drop']
pn_rate_limit:
description:
- rate-limit for CPU class.
required: False
type: str
pn_name:
description:
- name for the CPU class.
required: False
type: str
'''
EXAMPLES = """
- name: create cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'present'
pn_name: 'icmp'
pn_rate_limit: '1000'
pn_scope: 'local'
- name: delete cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'absent'
pn_name: 'icmp'
- name: modify cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'update'
pn_name: 'icmp'
pn_rate_limit: '2000'
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the cpu-class command.
returned: always
type: list
stderr:
description: set of error responses from the cpu-class command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the cpu-class-show command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
clicopy = cli
cli += ' system-settings-show format cpu-class-enable no-show-headers'
out = run_commands(module, cli)[1]
out = out.split()
if 'on' not in out:
module.fail_json(
failed=True,
msg='Enable CPU class before creating or deleting'
)
cli = clicopy
cli += ' cpu-class-show format name no-show-headers'
out = run_commands(module, cli)[1]
if out:
out = out.split()
return True if name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='cpu-class-create',
absent='cpu-class-delete',
update='cpu-class-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_hog_protect=dict(required=False, type='str',
choices=['disable', 'enable',
'enable-and-drop']),
pn_rate_limit=dict(required=False, type='str'),
pn_name=dict(required=False, type='str'),
),
required_if=(
['state', 'present', ['pn_name', 'pn_scope', 'pn_rate_limit']],
['state', 'absent', ['pn_name']],
['state', 'update', ['pn_name']],
)
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
scope = module.params['pn_scope']
hog_protect = module.params['pn_hog_protect']
rate_limit = module.params['pn_rate_limit']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
NAME_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'cpu-class-modify':
if NAME_EXISTS is False:
module.fail_json(
failed=True,
msg='cpu class with name %s does not exist' % name
)
if command == 'cpu-class-delete':
if NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='cpu class with name %s does not exist' % name
)
if command == 'cpu-class-create':
if NAME_EXISTS is True:
module.exit_json(
skipped=True,
msg='cpu class with name %s already exists' % name
)
if scope:
cli += ' scope %s ' % scope
if command != 'cpu-class-delete':
if hog_protect:
cli += ' hog-protect %s ' % hog_protect
if rate_limit:
cli += ' rate-limit %s ' % rate_limit
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
dfeaef8960d9c3c78351dc377c9805836cc90639 | 69cfe57220f789eb1d1966ed22c6823f0beeb8ce | /covid_venv/lib/python3.7/site-packages/dash_html_components/Pre.py | 46214932ac801cfab78d16fc03ee6f01f0cbd582 | [
"MIT"
] | permissive | paulsavala/Covid19-model | 664e31780ee1c8e4ef2115af2f41b27e832e5e50 | 41aa96d7c9abc117550f904af11815f507f0f0a0 | refs/heads/master | 2022-07-15T17:39:05.842619 | 2020-11-16T20:42:22 | 2020-11-16T20:42:22 | 252,545,888 | 2 | 0 | MIT | 2022-06-22T01:37:35 | 2020-04-02T19:19:25 | Python | UTF-8 | Python | false | false | 4,767 | py | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Pre(Component):
"""A Pre component.
Pre is a wrapper for the <pre> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/pre
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- n_clicks (number; default 0): An integer that represents the number of times
that this element has been clicked on.
- n_clicks_timestamp (number; default -1): An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- role (string; optional): The ARIA role attribute
- data-* (string; optional): A wildcard data attribute
- aria-* (string; optional): A wildcard aria attribute
- accessKey (string; optional): Keyboard shortcut to activate or add focus to the element.
- className (string; optional): Often used with CSS to style elements with common properties.
- contentEditable (string; optional): Indicates whether the element's content is editable.
- contextMenu (string; optional): Defines the ID of a <menu> element which will serve as the element's context menu.
- dir (string; optional): Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)
- draggable (string; optional): Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional): Prevents rendering of given element, while keeping child elements, e.g. script elements, active.
- lang (string; optional): Defines the language used in the element.
- spellCheck (string; optional): Indicates whether spell checking is allowed for the element.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- tabIndex (string; optional): Overrides the browser's default tab order and follows the one specified instead.
- title (string; optional): Text to be displayed in a tooltip when hovering over the element.
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self._type = 'Pre'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Pre, self).__init__(children=children, **args)
| [
"[email protected]"
] | |
95e81629ec5b165d02943f34a71fc1f1080bcef5 | cfb4e8721137a096a23d151f2ff27240b218c34c | /mypower/matpower_ported/mp-opt-model/lib/@opt_model/solve.py | 4ebc7aba4174ad01cb668380102ca885eae9fb2c | [
"Apache-2.0"
] | permissive | suryo12/mypower | eaebe1d13f94c0b947a3c022a98bab936a23f5d3 | ee79dfffc057118d25f30ef85a45370dfdbab7d5 | refs/heads/master | 2022-11-25T16:30:02.643830 | 2020-08-02T13:16:20 | 2020-08-02T13:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | def solve(*args,nout=5,oc=None):
if oc == None:
from .....oc_matpower import oc_matpower
oc = oc_matpower()
return oc.solve(*args,nout=nout)
| [
"[email protected]"
] | |
3c6dc99ca36a539efb2e696f6b57cbd205a83f8b | ae7ba9c83692cfcb39e95483d84610715930fe9e | /baidu/Paddle/paddle/trainer/tests/config_parser_test.py | 5ca874cec7914a20f79c2c7b1873c5bd04f60dca | [
"Apache-2.0"
] | permissive | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import parse_config_and_serialize
if __name__ == '__main__':
parse_config_and_serialize('trainer/tests/test_config.conf', '')
parse_config_and_serialize(
'trainer/tests/sample_trainer_config.conf',
'extension_module_name=paddle.trainer.config_parser_extension')
parse_config_and_serialize('gserver/tests/pyDataProvider/trainer.conf', '')
| [
"[email protected]"
] | |
ae27520913674390e809620c54463d13c4e88d63 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CS35_IntroPython_GP/day3/intro/11_args.py | 2ec2eca832f454921138650bfb137e422a0c4711 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,852 | py | # Experiment with positional arguments, arbitrary arguments, and keyword
# arguments.
# Write a function f1 that takes two integer positional arguments and returns
# the sum. This is what you'd consider to be a regular, normal function.
<<<<<<< HEAD
def f1(a, b):
return a + b
=======
def f1(a, b):
return a + b
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f1(1, 2))
# Write a function f2 that takes any number of integer arguments and prints the
# sum. Google for "python arbitrary arguments" and look for "*args"
<<<<<<< HEAD
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
=======
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f2(7, 9, 1, 3, 4, 9, 0)) # Should print 33
a = [7, 6, 5, 4]
# What thing do you have to add to make this work?
<<<<<<< HEAD
print(f2(*a)) # Should print 22
=======
print(f2(*a)) # Should print 22
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f3 that accepts either one or two arguments. If one argument,
# it returns that value plus 1. If two arguments, it returns the sum of the
# arguments. Google "python default arguments" for a hint.
<<<<<<< HEAD
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
=======
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f4 that accepts an arbitrary number of keyword arguments and
# prints out the keys and values like so:
#
# key: foo, value: bar
# key: baz, value: 12
#
# Google "python keyword arguments".
<<<<<<< HEAD
def f4(**kwargs):
for k, v in kwargs.items():
print(f'key: {k}, value: {v}')
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
=======
def f4(**kwargs):
for k, v in kwargs.items():
print(f"key: {k}, value: {v}")
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Should print
# key: a, value: 12
# key: b, value: 30
f4(a=12, b=30)
# Should print
# key: city, value: Berkeley
# key: population, value: 121240
# key: founded, value: "March 23, 1868"
f4(city="Berkeley", population=121240, founded="March 23, 1868")
<<<<<<< HEAD
d = {
"monster": "goblin",
"hp": 3
}
=======
d = {"monster": "goblin", "hp": 3}
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# What thing do you have to add to make this work?
f4(**d)
| [
"[email protected]"
] | |
7accaa8ad9e3c45b158dd9537e55e683338dea29 | 70e1159856750f04e58c0ffc3f54d094a4602c07 | /booktest/views.py | 84958fd19d5631e83ebfd2b20bac16190adc186f | [] | no_license | wxp19940506/django_test | 032e78a4eb45eb0c54dbafd43dfd0e463d455bb5 | c586cb62d1bb1a21f3430155b3d82ab7b2a65da6 | refs/heads/master | 2021-05-10T11:52:54.186422 | 2018-01-22T07:55:11 | 2018-01-22T07:55:11 | 118,424,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | from django.shortcuts import render
from django.http import *
from django.template import RequestContext,loader
from .models import *
# Create your views here.
def index(request):
# temp = loader.get_template("booktest/index.html")
#
# return HttpResponse(temp.render())
booklist = BookInfo.objects.all()
context = {'lists':booklist}
return render(request,'booktest/index.html',context)
def show(request,id):
book = BookInfo.objects.get(pk=id)
herolist = book.heroinfo_set.all()
context = {'list':herolist}
return render(request,'booktest/show.html',context)
| [
"[email protected]"
] | |
e7fc2c8eede38ab1d057f2930410a29a6191871a | f14946892dcc62732cffd0dba364d2098e6de607 | /converter.py | 40fb58d5f55a79226720f093675f16897083b36c | [] | no_license | DollaR84/notes | 556368c12b0ead9901b05b95a5691138b588eb86 | a74ec7cf41b842501d1c24ec3b180d76be1fbef1 | refs/heads/master | 2023-03-26T21:04:37.415037 | 2020-06-18T14:11:15 | 2020-06-18T14:11:15 | 223,773,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | """
Converter database from old versions to new.
Created on 19.04.2020
@author: Ruslan Dolovanyuk
"""
from copy import deepcopy
from datetime import datetime
import os
from database import Database
import tables
import updates
class DBConverter:
"""Converter database on new update versions."""
def __init__(self, db_name):
"""initialization converter database."""
self.__db_name = db_name
self.__db = Database()
self.__update_functions = [
'update_db2',
'update_db3',
'update_db4',
'update_db5',
]
def __get_old_data(self, tables_list):
"""Get all data from old database."""
self.__old_data = {table: self.__db.get("SELECT * FROM %s" % table) for table in tables_list}
def checker(self, db, tables_dict):
"""Check and return version input database."""
tables_db = db.get_tables_names()
tables_cr = tables.get_tables_names(tables_dict)
diff_tables = list(set(tables_cr) - set(tables_db))
if not diff_tables:
for table in tables_cr:
columns_db = db.get_columns_names(table)
diff_columns = list(set(tables.get_columns_names(tables_dict[table])) - set(columns_db))
if 'order_sort' in diff_columns:
return 1
elif 'readonly' in diff_columns:
return 2
elif ('date_create' in diff_columns) and ('date_update' in diff_columns):
return 3
elif ('state_check' in diff_columns) and ('state' in diff_columns):
return 4
else:
pass
elif 'states' in diff_tables:
return 4
else:
pass
return tables.VERSION
def __save_old_db(self, db_name, version):
"""Saving old databases before updates."""
date = datetime.strftime(datetime.now(), "%d.%m.%Y")
time = datetime.strftime(datetime.now(), "%H.%M.%S")
try:
os.rename(''.join([db_name, '.db']), ''.join([db_name, '.v{}.'.format(version), date, '.db']))
except:
os.rename(''.join([db_name, '.db']), ''.join([db_name, '.v{}.'.format(version), date, '.', time, '.db']))
def update_db(self, db_ver, tables_dict_default, update_func):
"""Run update database tables."""
self.__db.connect(self.__db_name + '.db')
self.__get_old_data(self.__db.get_tables_names())
self.__db.disconnect()
self.__save_old_db(self.__db_name, db_ver)
self.__db.connect(self.__db_name + '.db')
tables_dict = deepcopy(tables_dict_default)
for table in tables_dict.keys():
tables_dict[table].extend(updates.columns_all(table, db_ver+1))
script = 'CREATE TABLE {} ({}) WITHOUT ROWID'.format(table,
', '.join([' '.join(row) for row in tables_dict[table]]))
self.__db.put(script)
columns = tables.get_columns_names(tables_dict[table])
rows = self.__old_data.get(table, [])
update_func(table, columns, rows)
self.__db.commit()
self.__db.disconnect()
def update_db2(self, table, columns, rows):
"""Update database tables from version database 1 to version 2."""
counter = {}
for row in rows:
if table == 'notes':
parent = row[-1]
if parent not in counter:
counter[parent] = 0
counter[parent] += 1
script = 'INSERT INTO {} ({}) VALUES ({}, {})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]),
counter[parent])
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db3(self, table, columns, rows):
"""Update database tables from version database 2 to version 3."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, 0)'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db4(self, table, columns, rows):
"""Update database tables from version database 3 to version 4."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, "", "")'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def update_db5(self, table, columns, rows):
"""Update database tables from version database 4 to version 5."""
for row in rows:
if table == 'notes':
script = 'INSERT INTO {} ({}) VALUES ({}, 0, "")'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
else:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table,
', '.join(columns),
', '.join(['?' for _ in range(len(row))]))
self.__db.put(script, *row)
def check_rows(self, db, tables_dict):
"""Add rows in updates databases."""
for table in list(tables_dict.keys()):
update_dict = updates.ROWS.get(table, {})
for version, rows in update_dict.items():
if version <= tables.VERSION:
if db.get_last_id(table) < int(rows[-1].split(', ')[0]):
columns = tables.get_columns_names(tables_dict[table])
for row in rows:
script = 'INSERT INTO {} ({}) VALUES ({})'.format(table, ', '.join(columns), row)
db.put(script)
db.commit()
def run(self, tables_dict_default, tables_dict):
"""Run convert data from old database to new."""
try:
self.__db.connect(self.__db_name + '.db')
db_ver = self.checker(self.__db, tables_dict)
self.__db.disconnect()
for index in range(db_ver-1, tables.VERSION-1):
self.update_db(index+1, tables_dict_default, getattr(self, self.__update_functions[index]))
except Exception as e:
print(e)
return False
return True
def main():
"""Main running this script."""
dbconv = DBConverter('notes')
dbconv.run()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8a344aae06dbeb32785b94bf82c33c8f84c20b41 | 55d13d3e41d8651facf7c26d60de5e8b8ace4be5 | /piedpiper/crab/multicrab-0.py | ac77e6ce915307348ba0838f04a1b7373744c932 | [] | no_license | phylsix/Firefighter | e8ab5fdbde2dab341a67740aa62c5710683e9bab | 8f1d8d6e59b443a8216c70ebdd334b48945aeed0 | refs/heads/master | 2020-12-19T18:31:14.312639 | 2020-08-04T00:35:45 | 2020-08-04T00:35:45 | 235,812,142 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,190 | py | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
from os.path import basename, join
import yaml
from CRABAPI.RawCommand import crabCommand
from crabConfig_0 import *
from Firefighter.piedpiper.utils import *
verbose = False
alwaysDoCmd = True
if os.environ["CMSSW_BASE"] not in os.path.abspath(__file__):
print("$CMSSW_BASE: ", os.environ["CMSSW_BASE"])
print("__file__: ", os.path.abspath(__file__))
sys.exit("Inconsistant release environment!")
BASEDIR = join(os.environ["CMSSW_BASE"], "src/Firefighter/piedpiper")
CONFIG_NAME = sys.argv[1]
assert os.path.isfile(CONFIG_NAME)
def main():
multiconf = yaml.load(open(CONFIG_NAME).read())
gridpacks = multiconf["gridpacks"]
njobs = multiconf["njobs"]
year = multiconf["year"]
lxy = multiconf["lxy"]
ctaus = multiconf.get("ctaus", None)
assert len(gridpacks) == len(ctaus)
ctaumap = dict(zip(gridpacks, ctaus))
config.Data.totalUnits = config.Data.unitsPerJob * njobs
config.Data.outLFNDirBase += "/{0}".format(year)
# loop through
donelist = list()
for gridpack in gridpacks:
print("gridpack:", gridpack)
#'SIDM_XXTo2ATo4Mu_mXX-1000_mA-0p25_slc6_amd64_gcc481_CMSSW_7_1_30_tarball.tar.xz'
gridpack_name = basename(gridpack)
## outputPrimaryDataset: SIDM_XXTo2ATo4Mu or SIDM_XXTo2ATo2Mu2e
config.Data.outputPrimaryDataset = gridpack_name.split("_mXX")[0]
## outputDatasetTag: mXX-1000_mA-0p25_lxy-0p3_ctau-0p001875_GENSIM_2018
mxxma = gridpack_name.split("_", 2)[-1].split("_slc")[0]
lxystr = str(lxy).replace(".", "p")
ctaustr = str(ctaumap[gridpack]).replace(".", "p")
config.Data.outputDatasetTag = "{}_lxy-{}_ctau-{}_GENSIM_{}".format(
mxxma, lxystr, ctaustr, year
)
## requestName
config.General.requestName = "_".join(
[
config.Data.outputPrimaryDataset,
config.Data.outputDatasetTag,
time.strftime("%y%m%d-%H%M%S"),
]
)
if gridpack.startswith("root://"):
cpcmd = "xrdcp -f {0} {1}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
elif gridpack.startswith("http"):
cpcmd = "wget -q {} -O {}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
else:
cpcmd = "cp {0} {1}".format(gridpack, join(BASEDIR, "cfg/gridpack.tar.xz"))
if verbose:
print("$", cpcmd)
print(
"$ cat", join(BASEDIR, "python/externalLHEProducer_and_PYTHIA8_Hadronizer_cff.py")
)
print(get_gentemplate(year).format(CTAU=ctaumap[gridpack]))
print("------------------------------------------------------------")
print(config)
print("------------------------------------------------------------")
doCmd = True if alwaysDoCmd else raw_input("OK to go? [y/n]").lower() in ["y", "yes"]
if doCmd:
# 1. copy gridpack
os.system(cpcmd)
# 2. write genfrag_cfi
with open(
join(BASEDIR, "python/externalLHEProducer_and_PYTHIA8_Hadronizer_cff.py"), "w"
) as genfrag_cfi:
genfrag_cfi.write(get_gentemplate(year).format(CTAU=ctaumap[gridpack]))
# 3. write gen_cfg
cfgcmd = get_command("GEN-SIM", year, rand=False)
os.system(cfgcmd)
# 4. crab submit
crabCommand("submit", config=config)
donelist.append(gridpack)
print("submitted: ", len(donelist))
for x in donelist:
print(x)
print("------------------------------------------------------------")
undonelist = [x for x in gridpacks if x not in donelist]
print("unsubmitted: ", len(undonelist))
for x in undonelist:
print(x)
if undonelist:
with open("unsubmitted-0.yml.log", "w") as outf:
yaml.dump(
{"gridpacks": undonelist, "njobs": njobs, "year": year},
outf,
default_flow_style=False,
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
03622786a4de2d5c12beee1a16d5fba75dcf2347 | 29ad9caf139fab91580d7be36c9bd07623c4ca4d | /py/edu_freq_min.py | fc3b361beeafe60bea31d57a072936492e1f99f0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] | permissive | bcgov/flatfile-tools | f324687389a508aad641131f70bb66c533917bbe | 749071129cab7a598bd4c2edf050dce59324a97f | refs/heads/master | 2021-06-10T15:14:08.266856 | 2021-05-13T14:23:48 | 2021-05-13T14:23:48 | 183,680,156 | 2 | 0 | Apache-2.0 | 2020-04-15T02:21:47 | 2019-04-26T18:58:46 | Python | UTF-8 | Python | false | false | 3,247 | py | # 20190315 take pharmanet dispensations, look for earliest dispense date of drug type, as well as dispense frequency
# output has same data, with freq and min_srv_date added
import os
import sys
import time
from misc import*
def expected(f_name, lookup):
if f_name not in lookup:
err("expected field: " + str(f_name))
def freq_min(fn):
f = open(fn)
if f == None:
err("failed to open file: " + str(fn))
fields = f.readline().strip().split(",")
print fields
lookup = {}
for i in range(0, len(fields)):
lookup[fields[i].lower()] = i
print " ", lookup
for i in ["studyid", "hp.din_pin"]:
expected(i, lookup)
#mindate, freq = f(studyid, hp.din_pin)
dat = {}
ci = 0
f_size = os.stat(fn).st_size
tt = ttt = t_0 = time.time()
while True:
words = None
try:
words = f.readline().strip().split(",")
except:
break
if words == ['']:
continue
for i in range(0, len(words)):
words[i] = words[i].strip().lower()
if len(words) != len(fields):
print words
err("wrong number of fields, check csv file")
key = words[lookup["studyid"]] + "," + words[lookup["hp.gen_drug"]]
if key not in dat:
# freq = 1, min(serv_date) = serve_date
dat[key] = [1, words[lookup["srv_date"]]]
else:
freq, min_serv_date = dat[key]
freq += 1
date = words[lookup["srv_date"]]
min_serv_date = min_serv_date if min_serv_date < date else date
dat[key] = [freq, min_serv_date]
ci += 1
if ci % 100000 == 0:
ttt = tt
tt = time.time()
print "file", " %: ", 100. * (float(f.tell()) / float(f_size)), " MB/s:", (float(f.tell()) / 1000000.) / (tt- t_0)#
f.close()
f = open(fn)
if f is None:
err("failed to open file: " + str(fn))
print " +r " + fn
g_n = fn + "_freq-min.csv"
print " +w " + g_n
g = open(g_n, "wb")
print " +w " + g_n
if g is None:
err("failed to open file: " + str(g_n))
fields.append("freq")
fields.append("min_srv_date")
g.write(",".join(fields))
f.readline() # fields
ci = 0
while True:
line, words = None, None
try:
line = f.readline().strip()
except:
break
if line == "":
continue
words = line.split(",")
for i in range(0, len(words)):
words[i] = words[i].strip().lower()
key = words[lookup["studyid"]] + "," + words[lookup["hp.gen_drug"]]
if key not in dat:
err("key should have been found")
freq, min_serv_date = dat[key]
g.write("\n" + line + "," + str(freq) + "," + str(min_serv_date))
ci += 1
if ci % 100000 == 0:
ttt = tt
tt = time.time()
print "file", " %: ", 100. * (float(f.tell()) / float(f_size)), " MB/s:", (float(f.tell()) / 1000000.) / (tt- t_0)#
f.close()
g.close()
freq_min("dsp_rpt.dat_slice.csv_select-STUDY.csv_lookup.csv")
freq_min("dsp_rpt.dat_slice.csv_select-CONTROL.csv_lookup.csv") | [
"[email protected]"
] | |
fedf9a4adf68e18a4d492e204426b009e4c03540 | 299c07abf832ba8b0a4181c526f95d6f861c3623 | /pages/views.py | 483e3caba3bab343c52bb5dfe15734215146eb40 | [] | no_license | ananyajana/hello-world | 37640880b8df2b170a4d64a7893eced35cf07293 | c498ec70016e22978f2c3f0365d6a38522254b72 | refs/heads/master | 2023-08-14T22:52:25.634216 | 2020-05-25T11:38:28 | 2020-05-25T11:38:28 | 266,763,806 | 0 | 0 | null | 2021-09-22T19:09:50 | 2020-05-25T11:39:56 | Python | UTF-8 | Python | false | false | 166 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def homePageView(request):
return HttpResponse('Hello, World!')
| [
"[email protected]"
] | |
e1ca10f66fe6e4a01b92ace526335679d0427751 | 42f4238073a70d1494537f8c8b07835b531e73a9 | /benchmarks/beach/redist_beach_erosion_board_waves_3d_c0p1_n.py | e0d724264d63efa1c4516fe87fb96968f2ac296f | [] | no_license | erdc/proteus-mprans | bd99257af7b3bbe08386533faf072dba22e93a61 | f8f4d20bc870b361c64c8ca2ceb99f045b373323 | refs/heads/master | 2022-09-11T13:18:39.973962 | 2022-08-11T16:27:29 | 2022-08-11T16:27:29 | 2,303,947 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | from proteus import *
from proteus.default_n import *
from redist_beach_erosion_board_waves_3d_p import *
from beach_erosion_board_waves_3d import *
if rdtimeIntegration == 'newton':
timeIntegration = NoIntegration
stepController = Newton_controller
elif rdtimeIntegration == 'tte':
timeIntegration = BackwardEuler_cfl
timeIntegration = PsiTCtte
elif rdtimeIntegration == 'osher-fmm':
timeIntegration = BackwardEuler_cfl
stepController = Osher_FMM_controller
runCFL=1.0
else:
timeIntegration = BackwardEuler_cfl
stepController = Osher_PsiTC_controller
#stepController = Osher_controller
runCFL=1.0
# timeIntegration = PsiTCtte
# stepController = PsiTCtte_controller
# rtol_res[0] = 0.0
# atol_res[0] = 0.1*L[0]/(nn-1.0)#10% of he
#runCFL=1.0
#DT=None
if spaceOrder == 1:
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
if spaceOrder == 2:
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
elementQuadrature = SimplexGaussQuadrature(nd,sloshbox_quad_order)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,sloshbox_quad_order)
subgridErrorType = HamiltonJacobi_ASGS
if LevelModelType == RDLS.LevelModel:#RDLSV2.OneLevelRDLSV2 and not RDLSV2.debugRDLS:
subgridErrorType = HamiltonJacobi_ASGS_opt
if rdtimeIntegration == 'newton':
subgridError = subgridErrorType(coefficients,nd,stabFlag='2',lag=False)
else:
subgridError = subgridErrorType(coefficients,nd,stabFlag='2',lag=True)
#subgridError = HamiltonJacobi_ASGS(coefficients,nd,lag=True)
shockCapturing = None
#shockCapturing = ResGrad_SC(coefficients,nd,shockCapturingFactor=0.9,lag=False)
if rdtimeIntegration == 'newton':
shockCapturing = ResGradQuad_SC(coefficients,nd,shockCapturingFactor=rd_shockCapturingFactor,lag=False)
else:
shockCapturing = ResGradQuad_SC(coefficients,nd,shockCapturingFactor=rd_shockCapturingFactor,lag=True)
massLumping = False
#multilevelNonlinearSolver = MultilevelEikonalSolver
#levelNonlinearSolver = UnstructuredFMMandFSWsolvers.FMMEikonalSolver
multilevelNonlinearSolver = NLNI
levelNonlinearSolver = Newton
if rdtimeIntegration != 'newton':
maxLineSearches = 0
nonlinearSmoother = NLGaussSeidel
fullNewtonFlag = True
#this needs to be set appropriately for pseudo-transient
tolFac = 0.0
nl_atol_res = 0.01*L[0]/nn
atol_res[0] = 1.0e-6 #for pseudo transient
rtol_res[0] = 0.0
numericalFluxType = DoNothing
maxNonlinearIts = 50 #1 for PTC
matrix = SparseMatrix
if usePETSc:
numericalFluxType = DoNothing
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
linearSmoother = GaussSeidel
linTolFac = 0.001
conservativeFlux = None
| [
"[email protected]"
] | |
9dca95f0eadc9b7576cb73579313ffa2ab36aaa3 | 444670e6d73ae9d95c0bb0459c8e02423876d2fb | /pycharm/LoginSite/mylogin/migrations/0001_initial.py | 08c4cb3c5cfd13d3c86c5e92dc2a59b4d175f342 | [] | no_license | zhangxingxing12138/web-pycharm | c8b6822be95bfb904f81f772185fe9e17fc77fc3 | 5f212e6805b0734aa3c791830526a95b24a930f4 | refs/heads/master | 2020-04-04T18:03:45.458309 | 2018-11-08T12:03:51 | 2018-11-08T12:03:51 | 156,148,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-06 00:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-c_time'],
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
),
]
| [
"[email protected]"
] | |
450e45abb2e6f78121b9289dfc49ce668ece839a | 5fa293d0ef6f3bdc4791d869cf503b107cc3a5fb | /soap_client/urls.py | 5e70df6a51ac6d70d04e1a6e01da2fd63ec1b6aa | [
"MIT"
] | permissive | alldevic/nav_info | 0779ab116dd7718ac1d63fecfbc2d47dd8863c22 | 32681d1cd3ad43472c8f7fb49922094c4045111c | refs/heads/master | 2023-08-27T00:23:57.233994 | 2021-11-05T15:24:48 | 2021-11-05T15:24:48 | 278,404,502 | 0 | 0 | MIT | 2021-11-05T15:24:49 | 2020-07-09T15:37:41 | Python | UTF-8 | Python | false | false | 317 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from soap_client import views
router = DefaultRouter()
router.register('raw', views.RawViewSet, basename='raw')
router.register('data', views.DataViewSet, basename='data')
urlpatterns = [
path('', include(router.urls)),
]
| [
"[email protected]"
] | |
1a71d1d48c8c1e7899c78ae5ffffd819170fff8b | 0c5fed6415f7a307d0885d7579969d4e8f0121c8 | /Assignements/MT17143_Assignment5&6/MT17143_Problem1.py | fb5a51bc4753c66cf95906fd1944be7a9261bf8c | [] | no_license | akshitasawhney3008/Machine-Learning | 4032dfb3efaa7fdeb794913bb30e39f7a2fece31 | bef1672ecb51e07b291349af9df219d2bfea8f2d | refs/heads/master | 2023-02-02T16:22:01.016505 | 2020-12-10T10:37:49 | 2020-12-10T10:37:49 | 320,232,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | #MT17143 Akshita Sawhney
#Problem 1 RNA Sequencing
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import SpectralClustering
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import math
montpick= open("Read_count_rattus.txt",'r')
matrix = []
read_file = montpick.readlines()
for line in read_file: #file is extracted in a 2D matrix
row = []
list_of_words = line.split()
for i in range(1, len(list_of_words)):
row.append(int(list_of_words[i]))
matrix.append(row)
#Normalization
trc = 0 # total read count is calculated
for l in matrix:
for el in l:
trc+=el
sum=0
count=0
# print(len(matrix[1]))
for i in range(len(matrix[0])): # Sum of each column is calculated
column_sum = 0
for l in matrix:
column_sum += l[i]
sum+=column_sum
sum=sum/len(matrix[0])
for l in matrix: #Each readcount value is divided by the total read count
for i in range(len(l)):
div = float(l[i]/trc)
l[i]=div
for l in matrix: #Each readcount value is then multiplied by the sum of columns
for i in range(len(l)):
l[i]= float(l[i] * sum)
#Log Transform
for l in matrix:
for i in range(len(l)):
l[i]=math.log(1+l[i],2)
# print(matrix)
# print("hi")
input_matrix = np.array(matrix)
# print(M)
#The actual data matrix is extracted from the phenodata which acts as the true data.
phenotype = []
phenodata = open("Phenotype.txt",'r')
lines= phenodata.readlines()
for l in lines:
phen = l.split()
phenotype.append(int(phen[0]))
# phenotype1 = phenotype[1:]
true_matrix= np.array(phenotype)
#Input Data is split into Train and Test set with test size to be 33%
X_train, X_test, y_train, y_test = train_test_split(np.transpose(input_matrix),true_matrix,test_size=0.33)
#Kmeans Clustering is performed
kmeans=KMeans(n_clusters=2, random_state= 0).fit(X_train)
kmean_prediction = kmeans.predict(X_test) #Test data is passed to check the results.
print(accuracy_score(y_test,kmean_prediction)*100) # Accuracy of the predicted output with true data is taken out.
X_train, X_test, y_train, y_test = train_test_split(np.transpose(input_matrix),true_matrix,test_size=0.33)
#MiniBatchKmeans clustering is performed
Minibatchkmeans = MiniBatchKMeans(n_clusters=2, random_state= 0).fit(X_train)
minibatchkmean_prediction = Minibatchkmeans.predict(X_test)
print(accuracy_score(y_test,minibatchkmean_prediction)*100)
#Principle Component Analysis is performed to reduce the input data to 2Dimensional data.
pca = PCA(n_components=2).fit_transform(np.transpose(input_matrix))
# pca_fit = pca.fit(np.transpose(input_matrix))
y_trans = np.transpose(true_matrix)
plt.scatter(pca[:, 0], pca[:, 1], y_trans.shape[0], c = y_trans) #Scatter is used to visualize the graph
plt.show() | [
"[email protected]"
] | |
b3953b62fa3db6523934640dd6efa14908a3bbea | c5744c2fda48ae6a79c155c641fe98021a0cb7f3 | /PP4E/System/setenv.py | a03d7c04233f64c0efbc11ad24b5bc1eaace0f37 | [] | no_license | skinkie/Scripts | e0fd3d3f767612ade111f28bc7af3e1b25fc2947 | 80a1ba71ddf9a0c5ff33866832cb5c42aca0c0b1 | refs/heads/master | 2021-05-31T16:57:21.100919 | 2016-05-23T09:58:59 | 2016-05-23T09:58:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python
# eg. 3-3
import os
print('setenv...', end=' ')
print(os.environ['USER'])
os.environ['USER'] = 'Brian'
os.system('python echoenv.py')
os.environ['USER'] = 'Arthur'
os.system('python echoenv.py')
os.environ['USER'] = input('?')
print(os.popen('python3.5 echoenv.py').read()) | [
"[email protected]"
] | |
fb602658c47b01a30ff2c7ae2b51eec8b1d10712 | faf2b052e7f8cd79467ad34fbc173f3bf4b1a21d | /test/test_ws_equipment_list_result.py | 7230f7e81388c99a49ad58906a36598acb75af45 | [] | no_license | atbe/MSU-Scholar-Api-Client-Python | 5d39577ce07ab285f0df9ee58a1ed7ff8ab08d2a | 31b263e5ad848fc6593c4662fbf2828ab9e2594c | refs/heads/master | 2021-05-04T00:51:46.720474 | 2018-03-12T23:52:17 | 2018-03-12T23:52:17 | 120,354,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | # coding: utf-8
"""
PURE API 510
This is the Pure Web Service. Listed below are all available endpoints, along with a short description.<br/>In order to use the Pure Web Service, you must enter an API key. These are generated in the Administrator tab of Pure, and issues with a given set of available endpoints.<br/>To enter your API key and begin your use, press the Authorize button to at the top of the page. You are then presented with two options for entering the API key: the first option is to use the API key in query format, and the second option is to use the API key in a header.<br/> For further documentation, see <a href=\"documentation/Default.htm\">API Documentation</a>.<br/>A new version of the API is released with each major version of Pure, and remains available for one year. This version is no longer available in Pure 5.14<br/>The old web service is deprecated, but still available <a href=\"../../../doc/\">here</a>, and it will no longer be available in Pure 5.13
OpenAPI spec version: 510
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import msu_scholars_api
from msu_scholars_api.rest import ApiException
from msu_scholars_api.models.ws_equipment_list_result import WSEquipmentListResult
class TestWSEquipmentListResult(unittest.TestCase):
""" WSEquipmentListResult unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testWSEquipmentListResult(self):
"""
Test WSEquipmentListResult
"""
# FIXME: construct object with mandatory attributes with example values
#model = msu_scholars_api.models.ws_equipment_list_result.WSEquipmentListResult()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
607f0c745c7df74bf1cbfc3ebac73ac5b92debb3 | 8d03310627f1f625eddda8f4a3e680892872e0ec | /batemaneq/__init__.py | 09ee7bcfa329b2d98875fd9beb5ea50bbdbf1f40 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | Rolleroo/batemaneq | 4da15e4bff32484d27ea9dc2b3338edc4956b0df | bd8c24d1f77ccb166b3210d81d9468f7789813ad | refs/heads/master | 2021-02-05T12:43:40.639427 | 2020-02-23T20:47:48 | 2020-02-23T20:47:48 | 243,781,711 | 1 | 0 | BSD-2-Clause | 2020-02-28T14:31:36 | 2020-02-28T14:31:36 | null | UTF-8 | Python | false | false | 356 | py | # -*- coding: utf-8 -*-
"""
batemaneq provides a Python package for evaluating the Bateman equation
"""
from __future__ import absolute_import
from ._release import __version__
from .bateman import bateman_parent, bateman_full
from ._bateman_double import bateman_parent as bateman_parent_arr
from ._bateman_double import bateman_full as bateman_full_arr
| [
"[email protected]"
] | |
d1be16162f4ac7c7277b10428e050b2deff850ea | ef18d99eff01a708dddfc2cbf77f68bb1d8aa889 | /python/415.add-strings.py | 0d3f611c808e58d327f44045f8c4f9a31bc6f054 | [
"MIT"
] | permissive | fengbaoheng/leetcode | 53d5b6f92f7958d551e6297f77c4edfc042a4d58 | e37535a06b0b4cb23de9a2cfa357eb689b1c06fb | refs/heads/master | 2023-07-23T11:33:42.938862 | 2023-07-08T08:43:46 | 2023-07-08T08:43:46 | 131,551,048 | 1 | 0 | MIT | 2022-11-16T02:53:49 | 2018-04-30T03:13:55 | Java | UTF-8 | Python | false | false | 1,582 | py | #
# @lc app=leetcode.cn id=415 lang=python3
#
# [415] 字符串相加
#
class Solution:
# 字符串拆分为链表或数组,逐位相加
# 直接转成数字会超出最大整数范围
def addStrings(self, num1: str, num2: str) -> str:
short_num, long_num = (num1, num2) if len(num1) < len(num2) else (num2, num1)
short_length = len(short_num)
long_length = len(long_num)
if short_length == 0:
return long_num
elif long_length == 0:
return short_num
# 转换为数组形式,并倒序
short_num = list(short_num)
short_num.reverse()
long_num = list(long_num)
long_num.reverse()
carry = 0
num = []
# 逐位相加
for i in range(short_length):
s = int(short_num[i]) + int(long_num[i]) + carry
if s >= 10:
carry = 1
s -= 10
else:
carry = 0
num.append(s)
# 处理长数组剩余的数字
for i in range(short_length, long_length):
s = int(long_num[i]) + carry
if s >= 10:
carry = 1
num.append(s - 10)
else:
carry = 0
num.append(s)
num.extend(long_num[i + 1 :])
break
# 最高位需要进1
if carry == 1:
num.append(1)
num.reverse()
return "".join(map(str, num))
if __name__ == "__main__":
print(Solution().addStrings("1234", "9999"))
| [
"[email protected]"
] | |
ff82dba0faaadec9068bbc9b3ccc625a721573a6 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/file/formats/gzip/GZipFileSystemFactory.pyi | 65e1831d93c9e7d26029af1620bb52a08cc18eb9 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | pyi | from typing import List
import ghidra.formats.gfilesystem
import ghidra.formats.gfilesystem.factory
import ghidra.util.task
import java.io
import java.lang
class GZipFileSystemFactory(object, ghidra.formats.gfilesystem.factory.GFileSystemFactoryWithFile, ghidra.formats.gfilesystem.factory.GFileSystemProbeBytesOnly):
MAX_BYTESREQUIRED: int = 65536
PROBE_BYTES_REQUIRED: int = 2
def __init__(self): ...
def create(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: ghidra.formats.gfilesystem.FSRLRoot, __a2: java.io.File, __a3: ghidra.formats.gfilesystem.FileSystemService, __a4: ghidra.util.task.TaskMonitor) -> ghidra.formats.gfilesystem.GFileSystem: ...
def equals(self, __a0: object) -> bool: ...
def getBytesRequired(self) -> int: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def probeStartBytes(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: List[int]) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def bytesRequired(self) -> int: ...
| [
"[email protected]"
] | |
1fa034f767ef9f88cf6992b4ac2982972c7b0b5f | ca61296e18ae834628b6a4199bbd5a9379bdeff3 | /worker/models.py | 0cd8b2b7d891bed8050b7ab22c805edc0417230a | [] | no_license | shashank-sharma/gdrs | 8979361a21a01097ca9f5a9e969c55c8548fedfa | b0cb17eade5049b5175dc78eb93b0385b72ac61a | refs/heads/master | 2020-03-29T22:41:18.934031 | 2019-01-22T15:31:31 | 2019-01-22T15:31:31 | 150,435,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | from django.db import models
from accounts.models import User
# Create your models here.
class Driver(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driving_licence_number = models.CharField(max_length=20)
expiry_date = models.DateField()
working = models.BooleanField(default=False)
class Cab(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driver = models.ForeignKey(Driver, on_delete=models.CASCADE)
license_plate = models.CharField(max_length=20)
car_model_id = models.CharField(max_length=20)
manufacturing_id = models.CharField(max_length=20)
active = models.BooleanField(default=True)
class CarModel(models.Model):
cab = models.ForeignKey(Cab, on_delete=models.CASCADE)
model_name = models.CharField(max_length=80)
model_description = models.CharField(max_length=100)
class shift(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
driver = models.ForeignKey(Driver, on_delete=models.CASCADE)
shift_start = models.DateField()
shift_end = models.DateField()
login_time = models.DateField()
logout_time = models.DateField()
| [
"[email protected]"
] | |
7aa49d03b00df1969a441a334cfa985a4fe57e98 | e87d793b3a5facc6e54e0263fbd67703e1fbb382 | /duckietown-world-venv/lib/python3.6/site-packages/geometry/manifolds/tests/embedding_test.py | c6435354a8abc351527207b355f8785768e2ff0f | [] | no_license | llingg/behaviour-benchmarking | a860bbe709309e13f3e1133d916944882199a40f | 85bbf1a9c2c628ba74480fe7abac3804d6afdac4 | refs/heads/v1 | 2022-10-06T08:21:29.068329 | 2020-06-11T07:02:46 | 2020-06-11T07:02:46 | 259,622,704 | 0 | 0 | null | 2020-06-02T17:52:46 | 2020-04-28T11:52:08 | C++ | UTF-8 | Python | false | false | 1,989 | py | # coding=utf-8
from geometry.manifolds import (SO3, SO2, R1, R2, R3, SE2, SE3, S2, S1, T1, T2,
T3, so2, so3, se2, se3, Tran3, Tran2, Tran1, tran2, tran1, tran3)
from nose.plugins.attrib import attr
def check_embed_relation_cond(A, B):
check_embed_relation_cond.description = 'Checking %s < %s' % (A, B)
msg = None
if not A.embeddable_in(B):
msg = '%s is not embeddable in %s' % (A, B)
if not B.can_represent(A):
msg = '%s cannot represent %s' % (B, A)
if msg:
raise Exception('%s;\n %s: %s\n %s: %s' %
(msg, A, A.relations_descriptions(),
B, B.relations_descriptions()))
def check_embed_relation(A, B):
check_embed_relation_cond(A, B)
points = list(A.interesting_points())
if not points:
msg = ('Cannot test because manifold %s does '
'not have interesting points' % A)
raise Exception(msg)
for a1 in points:
A.belongs(a1)
b = A.embed_in(B, a1)
B.belongs(b)
a2 = A.project_from(B, b)
A.belongs(a2)
a3 = B.project_to(A, b)
A.belongs(a3)
A.assert_close(a1, a2)
A.assert_close(a1, a3)
@attr('embed')
def test_embed_relations():
couples = []
def add(A, B):
couples.append((A, B))
add(R1, R2)
add(R2, R3)
add(R1, R3)
add(SO2, SO3)
add(SO2, SE3)
add(SO2, SE2)
add(SO3, SE3)
add(so3, se3)
add(so2, se2)
add(so2, se3)
add(S1, S2)
add(R1, SE2)
add(R2, SE2)
add(R1, SE3)
add(R2, SE3)
add(R3, SE3)
add(Tran1, SE2)
add(Tran2, SE2)
add(Tran1, SE3)
add(Tran2, SE3)
add(Tran3, SE3)
add(T1, T2)
add(T2, T3)
add(T1, T3)
add(T1, R1)
add(T2, R2)
add(T3, R3)
add(T3, SE3)
add(S1, SE3)
add(S2, SE3)
add(tran1, se3)
add(tran2, se3)
add(tran3, se3)
add(T1, S1)
for A, B in couples:
check_embed_relation(A, B)
| [
"[email protected]"
] | |
d37a305a988762a88462d72268ef5b9960e21900 | f7e9bf6fa18a41b52994b1f16fd55c4d69f33b56 | /plugins/embed_picasaweb_image/embed_picasaweb_image.py | 02b13d5d2a5d91850ad78e166be1f0e6b52a1e0a | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | siongui/userpages | 1716f2d24e52b514ea8534027cec9707783d0d47 | 494b95e61715a49dce6615103a5b19fa05f276f1 | refs/heads/master | 2023-07-01T12:52:04.813216 | 2023-06-12T16:31:25 | 2023-06-12T16:31:25 | 13,944,800 | 80 | 39 | Unlicense | 2023-08-18T03:51:40 | 2013-10-29T02:39:13 | Go | UTF-8 | Python | false | false | 1,564 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
# Creating reStructuredText Directives
# @see http://docutils.sourceforge.net/docs/howto/rst-directives.html
from docutils.parsers.rst import directives, Directive
from docutils import nodes
class embed_picasaweb_image(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'album_name' : directives.unchanged,
'css_class' : directives.unchanged,
'description' : directives.unchanged,
'album_url' : directives.uri,
'image_url' : directives.uri,
}
has_content = False
def run(self):
url = directives.uri(self.arguments[0])
album_name = self.options.get('album_name', None)
album_url = self.options.get('album_url', None)
image_url = self.options.get('image_url', None)
css_class = self.options.get('css_class', None)
description = self.options.get('description', u'')
if album_name and album_url:
html = u'<div class="{}"><a href="{}"><image src="{}"></a><div>{}</div><div class="album">From Album: <a href="{}">{}</a></div></div>'.format(
css_class, image_url, url, description, album_url, album_name)
else:
html = u'<div class="{}"><a href="{}"><image src="{}"></a><div>{}</div></div>'.format(
css_class, image_url, url, description)
return [nodes.raw('', html, format='html')]
def register():
directives.register_directive('embed_picasaweb_image', embed_picasaweb_image)
| [
"[email protected]"
] | |
018b478deaa34ef7036f428aa0a5ce8e3ee99722 | 7f3112bd1cb6d5831370f01db1bf4ef7b9d6aee6 | /selenium/test_search_in_python_org_search.py | 43a2016183284cf053b611255f753820858169f3 | [] | no_license | insta-code1/Python-Unittests | f8a2138ae457756d8897594eaa2745a40f908a7e | 84d62edce5b929b1822d4d7a92c7edf3003ddf07 | refs/heads/master | 2020-12-25T14:59:08.705048 | 2016-09-04T12:11:22 | 2016-09-04T12:11:22 | 67,342,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def test_search_in_python_org(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem = driver.find_element_by_name("q")
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
d0684e191884794bcca60c9a003d3a736017998e | f8ece22d9e9e12e2cbca56d72a6b2728ba9a275a | /polyaxon/experiments/utils.py | 50329e5e6fe312b3cb5120c878e85833117c63a9 | [
"MIT"
] | permissive | pparan/polyaxon | 8c8912f9ba724e007357efcaefeab86fec2d5630 | 423199721e90431209b00c0f76caa6b4f9aa4b24 | refs/heads/master | 2021-04-15T07:15:19.701268 | 2018-03-21T11:59:12 | 2018-03-21T11:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from experiments.models import Experiment
def is_experiment_still_running(experiment_id=None, experiment_uuid=None):
if not any([experiment_id, experiment_uuid]) or all([experiment_id, experiment_uuid]):
raise ValueError('`is_experiment_still_running` function expects an experiment id or uuid.')
try:
if experiment_uuid:
experiment = Experiment.objects.get(uuid=experiment_uuid)
else:
experiment = Experiment.objects.get(id=experiment_id)
except Experiment.DoesNotExist:
return False
if not experiment.is_running:
return False
return True
| [
"[email protected]"
] | |
f4ee36d85f337be493ffa614eb246403c3fd37ca | 2bd4392a0929bf294df65bf45338d62e22474a25 | /expenses/utils.py | 07e29fb03ca4ec007e93ca6c77e29ab631a28c23 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | DjangoGworls/django-expenses | c5c7825017884be1bd53d5d19ee15acfb7bafbbd | 60f2c20c21a9f01d7efa169b501e3beb361795d1 | refs/heads/master | 2023-01-30T01:20:45.723489 | 2020-11-07T11:13:03 | 2020-11-07T11:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | # Django-Expenses
# Copyright © 2018-2020, Chris Warrick.
# All rights reserved.
# See /LICENSE for licensing information.
"""Assorted Expenses utilities."""
import babel.numbers
import datetime
import decimal
import iso8601
import itertools
import typing
from django.utils import timezone
from django.conf import settings
from django.utils.translation import get_language
def format_money(amount: typing.Union[int, float, decimal.Decimal]) -> str:
"""Format an amount of money for display."""
if amount is None:
amount = 0
return babel.numbers.format_currency(
amount, settings.EXPENSES_CURRENCY_CODE, locale=settings.EXPENSES_CURRENCY_LOCALE
)
def today_date() -> datetime.date:
"""Get today’s date."""
return timezone.now().date()
def revchron(qs):
"""Sort expenses in reverse-chronological order."""
return qs.order_by("-date", "-date_added")
def round_money(amount: decimal.Decimal) -> decimal.Decimal:
"""Round money in a way appropriate for money."""
return amount.quantize(decimal.Decimal(".01"), rounding=decimal.ROUND_HALF_UP)
def dict_overwrite(destdict: dict, destkey, srcdict: dict, srckey=None) -> None:
"""Override a dict key with one taken from another dict."""
destdict[destkey] = srcdict.get(srckey or destkey, destdict[destkey])
def serialize_date(date: datetime.date) -> str:
"""Serialize a datetime."""
return date.isoformat()
def serialize_dt(dt: datetime.datetime) -> str:
"""Serialize a datetime."""
return dt.isoformat()
def serialize_decimal(amount: decimal.Decimal) -> str:
"""Serialize a decimal value."""
return str(amount)
def parse_date(date_str: str) -> datetime.date:
"""Parse an ISO 8601 date."""
return iso8601.parse_date(date_str).date()
def parse_dt(dt_str: str) -> datetime.datetime:
"""Parse an ISO 8601 datetime."""
return iso8601.parse_date(dt_str)
def parse_decimal(amount_str: str) -> decimal.Decimal:
"""Parse a decimal object."""
return decimal.Decimal(amount_str)
def parse_amount_input(amount_str: str) -> typing.Optional[decimal.Decimal]:
"""Parse an amount in a human-forgiving way."""
try:
return decimal.Decimal(amount_str)
except decimal.InvalidOperation:
try:
return decimal.Decimal(amount_str.replace(",", "."))
except ValueError:
return None
def get_babel_locale() -> str:
"""Get a babel-friendly locale name."""
lang, _, region = get_language().partition("-")
if not region:
region = lang.upper()
return f"{lang}_{region.upper()}"
T = typing.TypeVar("T")
def peek(iterable: typing.Iterable[T]) -> (T, typing.Iterable[T]):
"""Peek at the first row of an iterable.
Returns (first row, iterable with first row)."""
iterator = iter(iterable)
try:
first_row = next(iterator)
except StopIteration:
return None, None
return first_row, itertools.chain([first_row], iterator)
| [
"[email protected]"
] | |
9fbba12d321ad7bcae325cc7b8e8bc3d77faa827 | b557781831f6345f36f5d35b9c5fa6cbdb4c4815 | /billing/yup/views.py | eb97a8947f82f58dad204478f718bf8e1651efe5 | [] | no_license | komsihon/Project1 | 5c067bcc2f299a28163eccf27716ed092e070b78 | e32c481ad358c2a8af52d95a9bbc2f9faebfd703 | refs/heads/master | 2021-06-03T23:52:21.555310 | 2021-01-13T10:53:24 | 2021-01-13T10:53:24 | 98,784,648 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,108 | py | import json
import traceback
import requests
from django.conf import settings
from django.db import transaction
from django.http import HttpResponse
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.template.defaultfilters import slugify
from django.utils.module_loading import import_by_path
from ikwen.core.utils import get_service_instance
from ikwen.billing.models import PaymentMean, MoMoTransaction
import logging
logger = logging.getLogger('ikwen')
YUP = 'yup'
UNKNOWN_PHONE = '<Unknown>'
CURRENCY = "XAF"
def init_yup_web_payment(request, *args, **kwargs):
api_url = getattr(settings, 'YUP_API_URL', 'https://33027.tagpay.fr/online/online.php')
yup = json.loads(PaymentMean.objects.get(slug=YUP).credentials)
phone = UNKNOWN_PHONE
service = get_service_instance()
request.session['phone'] = phone
amount = int(request.session['amount'])
model_name = request.session['model_name']
object_id = request.session['object_id']
if request.user.is_authenticated():
username = request.user.username
language = request.user.language
else:
username = None
language = 'en'
# Request a session id
try:
params = {'merchantid': yup['merchant_id']}
session_id_request = requests.get(api_url, params=params, verify=False)
except requests.exceptions.HTTPError as errh:
logger.error("YUP: Http Error:", errh)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.ConnectionError as errc:
logger.error("Error Connecting:", errc)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.Timeout as errt:
logger.error("Timeout Error:", errt)
return HttpResponseRedirect(request.session['cancel_url'])
except requests.exceptions.RequestException as err:
logger.error("OOps: Something Else", err)
return HttpResponse(request.session['cancel_url'])
session_id_resp_message = session_id_request.text
if session_id_resp_message[:2] == "NO":
logger.debug("YUP: Unable to provide a session with %s as Merchand ID" % (yup['merchant_id']))
logger.debug("YUP: SERVER ERR TEXT is : %s" % session_id_resp_message)
return HttpResponse("Error, YUP: Unable to provide a session with %s as Merchand ID; Please check and restart" % (yup['merchant_id']))
else:
logger.debug("YUP: Session ID OK; ")
session_id = session_id_resp_message.replace('OK:', '')
payments_conf = getattr(settings, 'PAYMENTS', None)
if payments_conf:
conf = request.session['payment_conf']
path = payments_conf[conf]['after']
else:
path = getattr(settings, 'MOMO_AFTER_CASH_OUT')
with transaction.atomic(using='wallets'):
try:
momo_tx = MoMoTransaction.objects.using('wallets').get(object_id=object_id)
except MoMoTransaction.DoesNotExist:
momo_tx = MoMoTransaction.objects.using('wallets').create(service_id=service.id, type=MoMoTransaction.CASH_OUT,
phone=phone, amount=amount, model=model_name,
object_id=object_id, wallet=YUP, username=username,
callback=path)
except MoMoTransaction.MultipleObjectsReturned:
momo_tx = MoMoTransaction.objects.using('wallets').filter(object_id=object_id)[0]
request.session['tx_id'] = momo_tx.id
accept_url = request.session['return_url']
# accept_url += '/%d' % momo_tx.id
company_name = slugify(service.config.company_name).replace('-', ' ')
logger.debug("YUP: Initiating paymentof %dF with %s as Merchand ID" % (amount, yup['merchant_id']))
context = {
'api_url': api_url,
'sessionid': session_id,
'merchantid': yup['merchant_id'],
'amount': amount,
'currency': CURRENCY,
'purchaseref': object_id,
'phone': phone,
'brand': company_name,
'description': '',
'declineurl': request.session['cancel_url'],
'cancelurl': request.session['cancel_url'],
'accepturl': accept_url,
'text': '',
'language': language
}
return render(request, 'billing/yup/do_redirect.html', context)
def yup_process_notification(request, *args, **kwargs):
logger.debug("YUP: New incoming notification %s" % request.META['REQUEST_URI'])
amount = request.GET['amount']
object_id = request.GET['purchaseref']
paymentref = request.GET['paymentref']
error_text = request.GET.get('error')
status = request.GET['status']
try:
tx = MoMoTransaction.objects.using('wallets').get(object_id=object_id)
except:
logger.error("YUP: Failure while querying transaction status", exc_info=True)
return HttpResponse("OK")
logger.debug("YUP: Successful payment of %dF from %s" % (tx.amount, tx.username))
if status == "OK":
path = tx.callback
momo_after_checkout = import_by_path(path)
with transaction.atomic(using='wallets'):
try:
with transaction.atomic():
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(processor_tx_id=paymentref, message='OK', is_running=False,
status=MoMoTransaction.SUCCESS)
except:
logger.error("YUP: Could not mark transaction as Successful. User: %s, Amt: %d" % (tx.username, tx.amount), exc_info=True)
else:
try:
momo_after_checkout(request, transaction=tx)
except:
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=traceback.format_exc())
logger.error("YUP: Error while running callback. User: %s, Amt: %d" % (tx.username, tx.amount), exc_info=True)
elif error_text != 'AUTHENTICATION':
with transaction.atomic(using='wallets'):
try:
if "CANCEL" in error_text:
logger.debug("YUP: transaction canceled. User: %s, Amt: %d " % (tx.username, tx.amount))
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=error_text, is_running=False, status=MoMoTransaction.DROPPED)
else:
logger.debug("YUP: transaction failed. User: %s, Amt: %d " % (tx.username, tx.amount))
MoMoTransaction.objects.using('wallets').filter(object_id=object_id) \
.update(message=error_text, is_running=False, status=MoMoTransaction.FAILURE)
except:
logger.error("YUP: Could not mark transaction as Failed or Canceled. User: %s, Amt: %s" % (tx.username, tx.amount), exc_info=True)
return HttpResponse('OK')
| [
"[email protected]"
] | |
9e17efeaae7712f632dfc951b8c4faccf09300ea | 3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9 | /EventFilter/Cosmics/python/__init__.py | 8de5e10f583a8354f7bdce130bf75b64b564ba0f | [] | no_license | sextonkennedy/cmssw-ib | c2e85b5ffa1269505597025e55db4ffee896a6c3 | e04f4c26752e0775bd3cffd3a936b288ee7b0268 | HEAD | 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/EventFilter/Cosmics/',1)[0])+'/cfipython/slc6_amd64_gcc480/EventFilter/Cosmics')
| [
"[email protected]"
] | |
6cc0b40552a7b84b67654c5343748b10becaba83 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /qbittorrent_examples/common.py | ddc95e8e8fe8667135cad88bfda306fb07fca849 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from typing import List, Dict
from pathlib import Path
# pip install tabulate
from tabulate import tabulate
# pip install python-qbittorrent
from qbittorrent import Client
from config import IP_HOST, USER, PASSWORD
sys.path.append(str(Path(__file__).resolve().parent.parent))
from human_byte_size import sizeof_fmt
def print_table(rows: List[List[str]], headers: List[str], show_index=True):
if show_index:
show_index = range(1, len(rows) + 1)
text = tabulate(rows, headers=headers, tablefmt="grid", showindex=show_index)
print(text)
def print_files_table(files: List[Dict]):
rows = [(file['name'], sizeof_fmt(file['size'])) for file in sorted(files, key=lambda x: x['name'])]
headers = ['#', 'File Name', 'Size']
print_table(rows, headers)
def print_torrents(torrents: List[Dict]):
total_size = 0
for i, torrent in enumerate(torrents, 1):
torrent_size = torrent['total_size']
total_size += torrent_size
print(f"{i:3}. {torrent['name']} ({sizeof_fmt(torrent_size)})")
print()
print(f'Total torrents: {len(torrents)}, total size: {sizeof_fmt(total_size)} ({total_size} bytes)')
def get_client() -> Client:
client = Client(IP_HOST)
client.login(USER, PASSWORD)
return client
| [
"[email protected]"
] | |
4d066a1f3af37064dc6990b14a9a2e2baf54dc92 | 8f70b40ef1c657ee14accfe6e2f8b1ebb1bebb7e | /employeeform/migrations/0004_auto_20191206_1630.py | f84f3d3d85c39b082cf3985e9977f625ffe70444 | [] | no_license | TejashviVerma/School_ERP | e3d6f1aabe92167c2b55c0b1682dde505bb04edd | 11406da8b1d8701b7ea55f75c76f1cbf44a72c53 | refs/heads/master | 2023-08-03T15:10:11.481306 | 2020-09-13T18:02:40 | 2020-09-13T18:02:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.2.5 on 2019-12-06 11:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employeeform', '0003_auto_20191206_1619'),
]
operations = [
migrations.RenameField(
model_name='employeedocuments',
old_name='idProof',
new_name='IdProof',
),
]
| [
"[email protected]"
] | |
482a9c23b8b78c4c068f2a92b69400934aa9d8fd | 5f06ea565f6d0d555a0034de591c1948b925a7e7 | /blog/views.py | 1cae3ad2d74213e99b7c23fb9a3da2f424d190bb | [] | no_license | cement-hools/blog_by_molchanov | 82ef3385080320b74a1cd9c4c21446d8f0ae60e4 | da0a4c2c083c5c1da0d720a631ae1253792b32be | refs/heads/main | 2023-03-30T08:51:41.100697 | 2021-03-28T02:09:49 | 2021-03-28T02:09:49 | 350,162,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.db.models import Q
from django.shortcuts import render
from django.views.generic import View
from blog.forms import TagForm, PostForm
from blog.models import Post, Tag
from blog.utils import (ObjectDetailMixin, ObjectCreateMixin,
ObjectUpdateMixin, ObjectDelete)
OBJ_IN_PAGE = 3
def posts_list(request):
search_query = request.GET.get('search')
if search_query:
posts = Post.objects.filter(
Q(title__icontains=search_query) |
Q(body__icontains=search_query)
)
else:
posts = Post.objects.all()
paginator = Paginator(posts, OBJ_IN_PAGE)
page_number = request.GET.get('page', 1)
page = paginator.get_page(page_number)
is_paginated = page.has_other_pages()
if page.has_previous():
prev_url = f'?page={page.previous_page_number()}'
else:
prev_url = ''
if page.has_next():
next_url = f'?page={page.next_page_number()}'
else:
next_url = ''
context = {
'page_object': page,
'is_paginated': is_paginated,
'next_url': next_url,
'prev_url': prev_url,
}
return render(request, 'blog/index.html', context)
class PostDetail(ObjectDetailMixin, View):
model = Post
template = 'blog/post_detail.html'
class PostCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = PostForm
template = 'blog/post_create_form.html'
raise_exception = True
class PostUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Post
model_form = PostForm
template = 'blog/post_update_form.html'
raise_exception = True
class PostDelete(LoginRequiredMixin, ObjectDelete, View):
model = Post
template = 'blog/post_delete_form.html'
redirect_url = 'posts_list_url'
raise_exception = True
class TagDetail(ObjectDetailMixin, View):
model = Tag
template = 'blog/tag_detail.html'
class TagCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = TagForm
template = 'blog/tag_create_form.html'
raise_exception = True
class TagUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Tag
model_form = TagForm
template = 'blog/tag_update_form.html'
raise_exception = True
class TagDelete(LoginRequiredMixin, ObjectDelete, View):
model = Tag
template = 'blog/tag_delete_form.html'
redirect_url = 'tags_list_url'
raise_exception = True
def tags_list(request):
tags = Tag.objects.all()
context = {
'tags': tags,
}
return render(request, 'blog/tags_list.html', context)
| [
"[email protected]"
] | |
65da08b0f3c75f793eca363ec016e0441370c495 | a47ac7c64cb6bb1f181eadff8e4b24735c19080a | /PythonStudy/9-Tkinter/4-Entry.py | fc6d9a973f75667cf9bcbae7cca69b495df559b5 | [
"MIT"
] | permissive | CoderTitan/PythonDemo | 6dcc88496b181df959a9d43b963fe43a6e4cb032 | feb5ef8be91451b4622764027ac684972c64f2e0 | refs/heads/master | 2020-03-09T09:15:28.299827 | 2018-08-21T03:43:25 | 2018-08-21T03:43:25 | 128,708,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # 主窗口
import tkinter
# 验证输入的文字
def varileText():
text = entry4.get()
if text != '1':
print('对喽')
return True
print('错漏')
return False
#
def testInvaild():
print('invaildCommanf被调用')
return True
# 创建主窗口
window = tkinter.Tk()
# 设置标题
window.title('Titanjun')
# 设置窗口大小
window.geometry('400x400')
button = tkinter.Button(window, text='Titan', bg='#ff4040')
button.pack()
'''
输入控件
用于显示简单的文本内容
'''
vari = tkinter.Variable()
entry = tkinter.Entry(window, textvariable=vari)
entry.pack()
# 设置值
vari.set('very good')
# 取值
print(vari.get())
print(entry.get())
# 只读输入框
vari2 = tkinter.Variable()
entry2 = tkinter.Entry(window, textvariable=vari2, state='disabled')
entry2.pack()
# 设置值
vari2.set('very bad')
print(vari2.get())
# 密码输入框, 无论输入什么都显示密文
vari3 = tkinter.Variable()
entry3 = tkinter.Entry(window, textvariable=vari3, show='@', bg='red', fg='white')
entry3.pack()
# 验证输入的内容是否符合要求
vari4 = tkinter.Variable()
entry4 = tkinter.Entry(window, textvariable=vari4, validate='key', validatecommand=varileText, invalidcommand=testInvaild)
entry4.pack()
# 进入消息循环
window.mainloop()
| [
"[email protected]"
] | |
c7224b78c1a6f736145512b1515152716e084fb0 | 7a63ce94e1806a959c9c445c2e0bae95afb760c8 | /tests/user/test_update_credentials.py | 0525fc1882db4236ea941f77e653a698474a366a | [
"MIT"
] | permissive | pklauke/pycamunda | 20b54ceb4a40e836148e84912afd04d78d6ba0ec | 3faac4037212df139d415ee1a54a6594ae5e9ac5 | refs/heads/master | 2023-08-18T10:23:30.503737 | 2022-04-17T18:34:40 | 2022-04-17T18:34:40 | 240,333,835 | 40 | 16 | MIT | 2023-09-12T13:29:08 | 2020-02-13T18:37:25 | Python | UTF-8 | Python | false | false | 2,128 | py | # -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.base
import pycamunda.user
import pycamunda.resource
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_update_credentials_params(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
assert update_credentials.url == engine_url + '/user/janedoe/credentials'
assert update_credentials.query_parameters() == {}
assert update_credentials.body_parameters() == {
'password': 'password',
'authenticatedUserPassword': 'password'
}
@unittest.mock.patch('requests.Session.request')
def test_update_credentials_calls_requests(mock, engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
update_credentials()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'PUT'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_update_credentials_raises_pycamunda_exception(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
with pytest.raises(pycamunda.PyCamundaException):
update_credentials()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_update_credentials_raises_for_status(mock, engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
update_credentials()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_update_credentials_returns_none(engine_url, update_credentials_input):
update_credentials = pycamunda.user.UpdateCredentials(
url=engine_url, **update_credentials_input
)
result = update_credentials()
assert result is None
| [
"[email protected]"
] | |
b2741fa2aa47d2ca507a4a587d78662b490be852 | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/jacobi-2d/tmp_files/4634.py | 598e8470565aa941811dde2f95b33c4baece406f | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-2d/tmp_files/4634.c')
procedure('kernel_jacobi_2d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(0,4,64,4)
tile(1,2,16,2)
tile(1,4,64,4)
| [
"[email protected]"
] | |
b85e6af344facb6e0df6e9ed8dff20da26f7144a | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/merge-strings-alternately.py | 107572aa3949742adfc4813ca836790e9dbcd7cc | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 471 | py | # Time: O(m + n)
# Space: O(1)
class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
result = []
i = 0
while i < len(word1) or i < len(word2):
if i < len(word1):
result.append(word1[i])
if i < len(word2):
result.append(word2[i])
i += 1
return "".join(result)
| [
"[email protected]"
] | |
d3d2478915380b6f8d4f5778c5babd647003d786 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/show_instance_result_response.py | 89a066b6d19712691fb0599b6d0fc736ad86c3d5 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,168 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceResultResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'resources': 'list[SubInstanceResult]'
}
attribute_map = {
'count': 'count',
'resources': 'resources'
}
def __init__(self, count=None, resources=None):
"""ShowInstanceResultResponse
The model defined in huaweicloud sdk
:param count: 总数量
:type count: int
:param resources: resources
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
super(ShowInstanceResultResponse, self).__init__()
self._count = None
self._resources = None
self.discriminator = None
if count is not None:
self.count = count
if resources is not None:
self.resources = resources
@property
def count(self):
"""Gets the count of this ShowInstanceResultResponse.
总数量
:return: The count of this ShowInstanceResultResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ShowInstanceResultResponse.
总数量
:param count: The count of this ShowInstanceResultResponse.
:type count: int
"""
self._count = count
@property
def resources(self):
"""Gets the resources of this ShowInstanceResultResponse.
resources
:return: The resources of this ShowInstanceResultResponse.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ShowInstanceResultResponse.
resources
:param resources: The resources of this ShowInstanceResultResponse.
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceResultResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
990724591460f6a3454e06b1a3df500f07f90241 | 58ee1dc37b57e0b4f06cf383c6a9e0654f490150 | /python-tflearn-git/lilac.py | 5214cab05f245eed7f14892fa3df205c85351b16 | [] | no_license | MikeyBaldinger/arch4edu | f3af87ef3a8d4cd78fde7e0ef75658c17dbe8c06 | c1775bf7fe0ffc87f3c8b4109fb1e8acde12a430 | refs/heads/master | 2022-12-23T16:40:55.513537 | 2020-09-28T21:00:59 | 2020-09-28T21:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny', 'email': 'Jingbei Li <[email protected]>'}]
update_on = [{'aur': None}, {'github': 'tflearn/tflearn'}, {'alias': 'python'}]
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
] | |
54073a0a96169761ca6e309c1f572aa135b71df0 | 682319f56c17e949bab0d6e418838d33977dd760 | /RP/search_element.py | 6bddc659f268253cf4d1a9296c7704a8a0a4f81b | [] | no_license | DilipBDabahde/PythonExample | 8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b | 669762a8d9ee81ce79416d74a4b6af1e2fb63865 | refs/heads/master | 2020-08-23T01:05:44.788080 | 2020-07-25T21:59:52 | 2020-07-25T21:59:52 | 216,511,985 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | '''
.Write a program which accept N numbers from user and store it into List. Accept one another number from user and
return frequency of that number from List.
input: Num of elements: 12
input Elements: 5 8 6 8 5 9 3 7 2 21 1 5
Element to search = 5
output: Freq of search element is: 3
'''
def search_Element(arr, iNo):
if len(arr) < 0:
return -1;
icnt = 0; # icnt is counter variable which is used to increament it's value by One when we get our Element
for i in range(0, len(arr)):
if arr[i] == iNo:
icnt = icnt + 1;
return icnt;
def main():
arr_list = list(); # arr_list is object of list class , this object is used to add elements in it
size = input("Enter list size: ");
size = int(size); # type conversion of size variable str to int
print("Enter elements for list");
for i in range(0, size):
no = input("Enter element: ");
no = int(no); # type conversion
arr_list.append(no); # appending element to list class object
#now our list is created using loop iteration
print("Created list is: ",arr_list);
search_var = input("Enter number to search its freq:");
search_var = int(search_var);
result =search_Element(arr_list, search_var);
if result > 0 :
print("FReq of given variable in list is: ",result);
elif result == 0:
print("There is no element in list ");
else:
print("Invalid input");
if __name__ == "__main__":
main();
| [
"[email protected]"
] | |
34e6d9bd427d80013aeb40dfba6f4734f2d186e4 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/py-if-else/Correct/076.py | 2c07f238adcfd70b429c52cda3509dc1a5eb15ba | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #!/bin/python3
import sys
N = int(input().strip())
if(N%2==0) :
if (N<5 or N>20):
print('Not Weird')
else :
print ("Weird")
else :
print ("Weird") | [
"[email protected]"
] | |
850985fddff858e55bfd488b48ba7aff47e39da6 | fbf73800e27f66960f677a284c2771e66708973b | /subreview_lib/classicreviewdecisionpage.py | dfbc360e28ba50b5a16d59e1c83ece7bce6d2c65 | [
"MIT"
] | permissive | allankellynet/mimas | 94140a341693d4729b3cdf5ea94ef2f7e550aad6 | 10025d43bba9e84f502a266760786842e7158a05 | refs/heads/master | 2022-05-30T21:35:06.083902 | 2020-02-27T14:04:27 | 2020-02-27T14:04:27 | 235,146,506 | 0 | 0 | MIT | 2022-05-25T04:56:13 | 2020-01-20T16:30:39 | Python | UTF-8 | Python | false | false | 3,568 | py | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
import logging
from google.appengine.ext import ndb
# Local imports
import roundreviews
import basehandler
from submission_lib import submissionrecord
class ClassicReviewDecisionPage(basehandler.BaseHandler):
def make_page(self, crrt_conf):
review_round = int(self.request.get("round"))
tracks = crrt_conf.mapped_track_obects()
crrt_track = self.request.get("track", default_value=tracks.keys()[0])
submissions = self.sorted_submissions(crrt_conf, crrt_track, review_round)
template_values = {
'crrt_conf': crrt_conf,
"track_objects": tracks,
"crrt_track": crrt_track,
"submissions": submissions,
"submissions_len": len(submissions),
"decisions": submissionrecord.get_decision_summary(crrt_conf.key, crrt_track, review_round),
"decision_maker": crrt_conf.user_rights().has_decision_right_for_round(
self.get_crrt_user().email(), review_round),
"review_round": review_round,
"track_slots": crrt_conf.mapped_track_obects()[crrt_track].slots,
}
self.write_page('subreview_lib/classicreviewdecisionpage.html', template_values)
def sorted_submissions(self, crrt_conf, crrt_track, review_round):
submissions = submissionrecord.retrieve_conference_submissions_by_track_and_round(
crrt_conf.key, crrt_track, review_round)
if self.request.params.has_key("mean"):
sorted = submissionrecord.sort_submissions_by_mean_high_to_low(submissions, review_round)
else:
sorted = submissionrecord.sort_submissions_by_total_high_to_low(submissions, review_round)
return sorted
def get(self):
if not (self.session.has_key("crrt_conference")):
logging.debug("Conference key session variable missing")
return
crrt_conf = ndb.Key(urlsafe=self.session["crrt_conference"]).get()
self.make_page(crrt_conf)
def submit_decisions(self, review_round):
if not (self.session.has_key("crrt_conference")):
logging.debug("Conference key session variable missing")
return
roundreviews.submit_decisions(
ndb.Key(urlsafe=self.session["crrt_conference"]),
self.request.get("tracklist"),
review_round,
self.request)
def decline_no_decisions(self, review_round):
self.submit_decisions(review_round)
roundreviews.mass_track_change(
ndb.Key(urlsafe=self.session["crrt_conference"]),
self.request.get("tracklist"),
review_round,
"No decision",
"Decline")
def post(self):
review_round = int(self.request.get("review_round"))
if self.request.get("SubmitDecision"):
self.submit_decisions(review_round)
if self.request.get("DeclineNoDecisions"):
self.decline_no_decisions(review_round)
self.redirect("/classic_review_decisions?track=" +
self.request.get("tracklist") +
"&round=" + str(review_round))
| [
"[email protected]"
] | |
9ba77a93b34b31c7c184176444d94a568deb7688 | e4d4149a717d08979953983fa78fea46df63d13d | /Week5/Day1/XP.py | 2c661c7abdf2c8897ce0f26c7fbce353061e2d6f | [] | no_license | fayblash/DI_Bootcamp | 72fd75497a2484d19c779775c49e4306e602d10f | a4e8f62e338df5d5671fd088afa575ea2e290837 | refs/heads/main | 2023-05-05T20:55:31.513558 | 2021-05-27T06:48:40 | 2021-05-27T06:48:40 | 354,818,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,863 | py | # # Exercise 1: Cats
# # Instructions
# # Using this class
# class Cat:
# def __init__(self, name, age):
# self.name = name
# self.age = age
# def oldest_cat(cat_list):
# oldest_current=cat_list[0]
# for cat in cat_list:
# if cat.age>oldest_current.age:
# oldest_current=cat
# return oldest_current
# # Instantiate three Cat objects using the code provided above.
# c1=Cat("Roxy",3)
# c2=Cat("Meow",2)
# c3=Cat("Fluffy",4)
# # Outside of the class, create a function that finds the oldest cat and returns the cat.
# all_cats=[c1,c2,c3]
# oldest=oldest_cat(all_cats)
# print(f"{oldest.name} is the oldest cat and she is {oldest.age} years old.")
# # Print the following string: “The oldest cat is <cat_name>, and is <cat_age> years old.”. Use the function previously created.
# # Exercise 2 : Dogs
# # Instructions
# # Create a class called Dog.
# class Dog:
# def __init__(self,name,height):
# self.name=name
# self.height=height
# def bark(self):
# print(f"{self.name} goes woof!")
# # In this class, create an __init__ method that takes two parameters : name and height. This function instantiates two attributes, which values are the parameters.
# # Create a method called bark that prints the following string “<dog_name> goes woof!”.
# # Create a method called jump that prints the following string “<dog_name> jumps <x> cm high!”. x is the height*2.
# def jump(self):
# print(f"{self.name} jumps {self.height*2} cm")
# # Outside of the class, create an object called davids_dog. His dog’s name is “Rex” and his height is 50cm.
# davids_dog=Dog("Rex",50)
# print(davids_dog.name)
# print(davids_dog.height)
# davids_dog.bark()
# davids_dog.jump()
# # Print the details of his dog (ie. name and height) and call the methods bark and jump.
# # Create an object called sarahs_dog. Her dog’s name is “Teacup” and his height is 20cm.
# sarahs_dog=Dog("Teacup",20)
# print(sarahs_dog.name)
# print(sarahs_dog.height)
# sarahs_dog.bark()
# sarahs_dog.jump()
# # Print the details of her dog (ie. name and height) and call the methods bark and jump.
# # Create an if statement outside of the class to check which dog is bigger. Print the name of the bigger dog.
# if sarahs_dog.height>davids_dog.height:
# print(f"{sarahs_dog.name} is bigger.")
# else:
# print(f"{davids_dog.name} is bigger.")
# # Exercise 3 : Who’s The Song Producer?
# # Instructions
# # Define a class called Song, it will show the lyrics of a song.
# class Song:
# def __init__(self,lyrics):
# self.lyrics=lyrics
# # Its __init__() method should have two arguments: self and lyrics (a list).
# # Inside your class create a method called sing_me_a_song that prints each element of lyrics on its own line.
# def sing_me_a_song(self):
# for lyric in self.lyrics:
# print(lyric)
# # Create an object, for example:
# stairway= Song(["There’s a lady who's sure","all that glitters is gold", "and she’s buying a stairway to heaven"])
# # Then, call the sing_me_a_song method. The output should be:
# stairway.sing_me_a_song()
# # There’s a lady who's sure
# # all that glitters is gold
# # and she’s buying a stairway to heaven
# Exercise 4 : Afternoon At The Zoo
# Instructions
# Create a class called Zoo.
class Zoo:
def __init__(self,zoo_name):
self.zoo_name=zoo_name
self.animals=[]
self.list_animals=[]
# In this class create a method __init__ that takes one parameter: zoo_name.
# It instantiates two attributes: animals (an empty list) and name (name of the zoo).
# Create a method called add_animal that takes one parameter new_animal. This method adds the new_animal to the animals list as long as it isn’t already in the list.
def add_animal(self,new_animal):
if new_animal not in self.animals:
self.animals.append(new_animal)
# Create a method called get_animals that prints all the animals of the zoo.
def get_animals(self):
print(self.animals)
# Create a method called sell_animal that takes one parameter animal_sold. This method removes the animal from the list and of course the animal needs to exist in the list.
def sell_animal(self,animal_sold):
if animal_sold in self.animals:
self.animals.remove(animal_sold)
# Create a method called sort_animals that sorts the animals alphabetically and groups them together based on their first letter.
# Example
def sort_animals(self):
self.animals=sorted(self.animals)
# list_animals=[]
temp_list=[self.animals[0]]
for i in range(1,len(self.animals)):
if self.animals[i][0] == temp_list[-1][0]:
temp_list.append(self.animals[i])
else:
self.list_animals.append(temp_list)
temp_list=[]
temp_list.append(self.animals[i])
i+=1
# print(list_animals)
return {v+1: k for v, k in enumerate(self.list_animals)}
def get_groups(self):
for i in self.list_animals:
print(i)
fays_zoo=Zoo("fay")
fays_zoo.add_animal("Bear")
fays_zoo.add_animal("Ape")
fays_zoo.add_animal("Cat")
fays_zoo.add_animal("Emu")
fays_zoo.add_animal("Cougar")
fays_zoo.add_animal("Eel")
fays_zoo.add_animal("Baboon")
fays_zoo.get_animals()
print(fays_zoo.sort_animals())
fays_zoo.get_groups()
# {
# 1: "Ape",
# 2: ["Baboon", "Bear"],
# 3: ['Cat', 'Cougar'],
# 4: ['Eel', 'Emu']
# }
# Create a method called get_groups that prints the animal/animals inside each group.
#
# Create an object called ramat_gan_safari and call all the methods.
# Tip: The zookeeper is the one who will use this class.
# Example
# Which animal should we add to the zoo --> Giraffe
| [
"[email protected]"
] | |
d3a3564a7a3dfa3476aed8c37bc0eefe96e862bd | b54f9fb585648e4fe0b8ca727f42c97a6c1486fd | /variability/varModels.py | 5006e96121da25ab9e80c656b99ff8bf870894bf | [] | no_license | tribeiro/SMAPS | 46a36ab3fd74e35d97d9b43d5d80d88d9581b9da | b1e8dd9444e7fcbc7a82ab30941bab224b5ae600 | refs/heads/master | 2021-01-22T22:35:09.228649 | 2014-05-08T11:19:47 | 2014-05-08T11:19:47 | 19,570,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py |
import numpy as np
###################################################################################################
def ecbinary(time,period,ecduration,depth):
'''
Simulate eclipsing binary.
'''
phase = time / period
cycle = np.ceil(phase)
phase = phase - cycle
mask = np.bitwise_and(phase > -ecduration, phase < ecduration)
flux = np.zeros_like(time)+1.0
flux[mask] -= depth
return flux
###################################################################################################
def pulsating(time,period,amplitude):
'''
Simulate pulsating star.
'''
return np.sin(2*np.pi*time/period)
###################################################################################################
def transient(time,t0,amplitude,duration):
flux = np.zeros_like(time)
mask = time > t0
flux[mask] += amplitude * np.exp(- ((time[mask]-t0) / duration)**2.)
return flux
###################################################################################################
###################################################################################################
if __name__ == '__main__':
import pylab as py
tt = np.arange(10,40,0.1)
#tobs = np.loadtxt( '/Users/tiago/Documents/JPAS/variables/filtersObservations.txt',
# delimiter=',',unpack=True,usecols=(1,))
mag0 = 16
ectobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
ectobs.sort()
ecflx = mag0-ecbinary(tt,2,0.1,1.5)
ecobs = mag0-ecbinary(ectobs,2,0.1,1.5)
ecerr = np.random.exponential(0.1,len(ectobs)) * (-1)**np.random.randint(0,2,len(ectobs))
pltobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
pltobs.sort()
plflx = mag0-pulsating(tt,2,0.5)
plobs = mag0-pulsating(pltobs,2,0.5)
plerr = np.random.exponential(0.1,len(pltobs)) * (-1)**np.random.randint(0,2,len(pltobs))
trtobs = np.array([17.0413348326,17.0480014993,26.1886086683,30.3348673002])+np.random.random(1)[0]*10-5
trtobs.sort()
trflx = mag0-transient(tt,20,1.0,10)+transient(tt,600,10.0,40)
trobs = mag0-transient(trtobs,20,1.0,10)+transient(trtobs,600,10.0,40)
trerr = np.random.exponential(0.1,len(trtobs)) * (-1)**np.random.randint(0,2,len(trtobs))
py.figure(1,figsize=(8,4))
########################
ax1 = py.subplot(311)
py.plot(tt,ecflx,'-')
py.errorbar(ectobs,ecobs+ecerr,0.1,fmt='o')
py.ylim(17.499,14.5)
ax2 = py.subplot(312)
py.plot(tt,plflx,'-')
py.errorbar(pltobs,plobs+plerr,0.1,fmt='o')
py.ylim(17.5,14.5)
ax3 = py.subplot(313)
py.plot(tt,trflx,'-')
py.errorbar(trtobs,trobs+trerr,0.1,fmt='o')
py.ylim(17.5,14.501)
########################
py.setp(ax1.get_xticklabels(),visible=False)
py.setp(ax2.get_xticklabels(),visible=False)
ax3.set_xlabel('Time (days)')
ax2.set_ylabel('Magnitude')
py.subplots_adjust(hspace=0,wspace=0,bottom=0.13,top=0.93)
#py.savefig('/Users/tiago/Dropbox/Apps/TeX Writer (1)/fig/jpas_variability_fig01.pdf')
py.show()
################################################################################################### | [
"[email protected]"
] | |
5694f828530a430b4aca5569f67e50d0baf88575 | aff694b019806db8f8cd66fd205f9049351bb10c | /bin/wheel | e54d9f83eb92ea97085a22f82f854bd08e745464 | [] | no_license | mikilabarda/my-first-blog | 3885d08f87e9c3f05da7000b9e60d29f3895efd3 | 7e1476fa75e6db95bfe8685ad43a233777166071 | refs/heads/master | 2021-05-30T19:25:38.022284 | 2016-03-20T05:31:16 | 2016-03-20T05:31:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/Users/Miki/Desktop/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
e3399daf37f287b2f7c0b62e55f30e6611bf5d97 | 0f89043a9e7caac53bc76cd359d704d5cfaef3db | /main/migrations/0044_remove_tag_resources.py | eaef56cf0970beb2e07945c8e6a10d9b814acaf4 | [] | no_license | sirodoht/knowhub | f704d987f6c800717c2dba7b811d05b0d85801fd | 4c242a9f1bc14a11fbf799119b19d79c4201ba2d | refs/heads/master | 2022-03-05T15:28:55.539951 | 2019-11-18T18:33:42 | 2019-11-18T18:33:42 | 134,064,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # Generated by Django 2.1 on 2018-08-27 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("main", "0043_auto_20180826_0050")]
operations = [migrations.RemoveField(model_name="tag", name="resources")]
| [
"[email protected]"
] | |
29b9663bb72f21946ffdb20c501c498e7c0cfee6 | f2e09eea7c995df2cac15f16ae5eeb79b6fc748c | /odmltools/info.py | cda1f635ddc615fb0e6c0b291916d4bb2d05c164 | [
"BSD-2-Clause"
] | permissive | mpsonntag/odmltools | 676d829212ababd3ea3eb3396f25d0df8f3a4373 | 87e67fc737fbad2bd9866d529d47abbc2b7115d1 | refs/heads/master | 2021-07-13T07:54:23.214505 | 2021-06-21T18:11:19 | 2021-06-21T18:11:19 | 221,953,387 | 0 | 0 | null | 2019-11-15T15:40:14 | 2019-11-15T15:40:14 | null | UTF-8 | Python | false | false | 357 | py | import os
import json
INSTALL_PATH = os.path.dirname(__file__)
with open(os.path.join(INSTALL_PATH, "info.json")) as infofile:
infodict = json.load(infofile)
VERSION = infodict["VERSION"]
AUTHOR = infodict["AUTHOR"]
COPYRIGHT = infodict["COPYRIGHT"]
CONTACT = infodict["CONTACT"]
HOMEPAGE = infodict["HOMEPAGE"]
CLASSIFIERS = infodict["CLASSIFIERS"]
| [
"[email protected]"
] | |
7acf5941940c678da4795277f2ddd08749ad98a3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03192/s975847643.py | 0b87008f474274d7ec53b07ee4ec58d374c6d871 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | n = int(input())
li = []
while n > 0:
li.append(n%10)
n //= 10
li.reverse()
ans = 0
for i in range(len(li)):
if li[i] == 2:
ans += 1
print(ans) | [
"[email protected]"
] | |
8845672ea92d7bddefee80d4f9a40864a8f36823 | bb198232df12a1adb9e8a6164ff2a403bf3107cf | /cookie-monster/MonsterBrowser.py | da21df7b654d09e613e51a1984046a21401e3364 | [] | no_license | vanello/wifi-arsenal | 9eb79a43dfdd73d3ead1ccd5d2caf9bad9e327ee | 1ca4c5a472687f8f017222893f09a970652e9a51 | refs/heads/master | 2021-01-16T22:00:37.657041 | 2015-09-03T03:40:43 | 2015-09-03T03:40:43 | 42,060,303 | 1 | 0 | null | 2015-09-07T15:24:11 | 2015-09-07T15:24:11 | null | UTF-8 | Python | false | false | 3,179 | py | from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from PyQt4.QtNetwork import *
import getopt
import sys
import re
class MyBrowser(QWebView):
def __init__(self,father=None):
super(MyBrowser, self).__init__(father)
self.page().setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)
self.connect(self, SIGNAL("linkClicked(QUrl)"), self.onLinkClicked)
def onLinkClicked(self, url):
self.load(url)
class MonsterWindow(QWidget):
def __init__(self, father = None):
super(MonsterWindow, self).__init__(father)
class MonsterBrowser():
urlPat = re.compile("https?://([^/]*)(.*)")
def usage(self):
print """
Usage: python MonsterBrowser.py [options] url
Options:
-c --cookie <Cookie> set cookie
-u --useragent <UserAgent> set useragent
"""
def parseArguments(self, argv):
try:
opts, args = getopt.getopt(argv, "c:u:", ["cookie=", "useragent="])
except getopt.GetoptError:
self.usage()
sys.exit(2)
if len(args) < 1:
self.usage()
sys.exit(2)
url = args[0]
cookie = None
useragent = None
for opt, args in opts:
if opt in ("-c", "--cookie"):
cookie = args
if opt in ("-u", "--useragent"):
useragent = args
if useragent is None:
useragent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1"
print cookie, useragent, url
self.launch(cookie, useragent, url)
def launch(self, rawcookie, useragent, url):
'''
url: http://xxx.yyy.zzz/aaa/bbb?ccc/
host: xxx.yyy.zzz
domain: yyy.zzz
'''
cookies = []
# if no http protocol header, append it
if not url.startswith("http://"):
url = "http://" + url
match = self.urlPat.match(url)
host = match.group(1)
uri = match.group(2)
domain = ".".join(host.split(".")[-2:])
# adding cookies to cookiejar
for cookie in rawcookie.split(";"):
qnc = QNetworkCookie()
qnc.setDomain("."+domain)
key = cookie.split("=")[0]
value = "=".join(cookie.split("=")[1:])
qnc.setName(key)
qnc.setValue(value)
cookies.append(qnc)
self.open_web(url, cookies, useragent)
return
def open_web(self, url, cookies, useragent):
app = QApplication(sys.argv)
wind = QMainWindow()
view = MyBrowser()
nam = QNetworkAccessManager()
view.page().setNetworkAccessManager(nam)
print " [!] Spawning web view of " + url
ncj = QNetworkCookieJar()
ncj.setAllCookies(cookies)
nam.setCookieJar(ncj)
qnr = QNetworkRequest(QUrl(url))
qnr.setRawHeader("User-Agent", useragent)
view.load(qnr)
wind.setCentralWidget(view)
wind.show()
app.exec_()
if __name__ == "__main__":
browser = MonsterBrowser()
browser.parseArguments(sys.argv[1:])
| [
"[email protected]"
] | |
b50ab0437bdeb0851adabcf7abdab17632f1e3ef | 82b495a208ebdeb71314961021fbfe767de57820 | /chapter-06/temperature.py | 6c3619f4fe12c91df242c2a86240bd498aa1abd1 | [
"MIT"
] | permissive | krastin/pp-cs3.0 | 7c860794332e598aa74278972d5daa16853094f6 | 502be9aac2d84215db176864e443c219e5e26591 | refs/heads/master | 2020-05-28T02:23:58.131428 | 2019-11-13T13:06:08 | 2019-11-13T13:06:08 | 188,853,205 | 0 | 0 | MIT | 2019-11-13T13:06:09 | 2019-05-27T13:56:41 | Python | UTF-8 | Python | false | false | 469 | py | def convert_to_celsius(fahrenheit: float) -> float:
"""Return the number of Celsius degrees equivalent to fahrenheit
degrees.
>>> convert_to_celsius(75)
23.88888888888889
"""
return (fahrenheit - 32.0) * 5.0 / 9.0
def above_freezing(celsius: float) -> bool:
"""Return true if the temperature in celsius degrees is above freezing
>>> above_freezing(5.2)
True
>>> above_freezing(-2)
False
"""
return celsius > 0
| [
"[email protected]"
] | |
65db9b7872898345eee84550ab79aa3f9bbe16ab | 6ed034d0a5e239d7b0c528b287451409ffb4a494 | /mmpose/datasets/samplers/__init__.py | da09effaf20fefe1a102277672b98db7d884f002 | [
"Apache-2.0"
] | permissive | ViTAE-Transformer/ViTPose | 8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf | d5216452796c90c6bc29f5c5ec0bdba94366768a | refs/heads/main | 2023-05-23T16:32:22.359076 | 2023-03-01T06:42:22 | 2023-03-01T06:42:22 | 485,999,907 | 869 | 132 | Apache-2.0 | 2023-03-01T06:42:24 | 2022-04-27T01:09:19 | Python | UTF-8 | Python | false | false | 134 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
__all__ = ['DistributedSampler']
| [
"[email protected]"
] | |
a12343947c99a0584b18996596487918113884d1 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1360455/snippet.py | ff62eb5f770ed285b9b8fdc6e6f331c6b6e4e651 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 3,727 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Done under Visual Studio 2010 using the excelent Python Tools for Visual Studio
# http://pytools.codeplex.com/
#
# Article on ideas vs execution at: http://blog.databigbang.com/ideas-and-execution-magic-chart/
import urllib2
import json
from datetime import datetime
from time import mktime
import csv
import codecs
import cStringIO
class CSVUnicodeWriter: # http://docs.python.org/library/csv.html
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def get_hackernews_articles_with_idea_in_the_title():
endpoint = 'http://api.thriftdb.com/api.hnsearch.com/items/_search?filter[fields][title]=idea&start={0}&limit={1}&sortby=map(ms(create_ts),{2},{3},4294967295000)%20asc'
incomplete_iso_8601_format = '%Y-%m-%dT%H:%M:%SZ'
items = {}
start = 0
limit = 100
begin_range = 0
end_range = 0
url = endpoint.format(start, limit, begin_range, str(int(end_range)))
response = urllib2.urlopen(url).read()
data = json.loads(response)
prev_timestamp = datetime.fromtimestamp(0)
results = data['results']
while results:
for e in data['results']:
_id = e['item']['id']
title = e['item']['title']
points = e['item']['points']
num_comments = e['item']['num_comments']
timestamp = datetime.strptime(e['item']['create_ts'], incomplete_iso_8601_format)
#if timestamp < prev_timestamp: # The results are not correctly sorted. We can't rely on this one. if _id in items: # If the circle is complete. return items prev_timestamp = timestamp items[_id] = {'id':_id, 'title':title, 'points':points, 'num_comments':num_comments, 'timestamp':timestamp} title_utf8 = title.encode('utf-8') print title_utf8, timestamp, _id, points, num_comments start += len(results) if start + limit > 1000:
start = 0
end_range = mktime(timestamp.timetuple())*1000
url = endpoint.format(start, limit, begin_range, str(int(end_range))) # if not str(int(x)) then a float gives in the sci math form: '1.24267528e+12'
response = urllib2.urlopen(url).read()
data = json.loads(response)
results = data['results']
return items
if __name__ == '__main__':
items = get_hackernews_articles_with_idea_in_the_title()
with open('hn-articles.csv', 'wb') as f:
hn_articles = CSVUnicodeWriter(f)
hn_articles.writerow(['ID', 'Timestamp', 'Title', 'Points', '# Comments'])
for k,e in items.items():
hn_articles.writerow([str(e['id']), str(e['timestamp']), e['title'], str(e['points']), str(e['num_comments'])])
# It returns 3706 articles where the query says that they are 3711... find the bug... | [
"[email protected]"
] | |
9e427939fee2e4d3f52f2a70e6743b49bcc4d34e | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2tests/integration/mistral/test_errors.py | 3280859646406164d582cf4022c8c414ea41ca1f | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 6,469 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from integration.mistral import base
class ExceptionHandlingTest(base.TestWorkflowExecution):
def test_bad_workflow(self):
with self.assertRaises(Exception) as t:
self._execute_workflow('examples.mistral-foobar', {})
self.assertIn('Action "examples.mistral-foobar" cannot be found', t.exception.message)
def test_bad_action(self):
execution = self._execute_workflow('examples.mistral-error-bad-action', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution)
self.assertIn('Failed to find action', execution.result['extra']['state_info'])
def test_bad_wf_arg(self):
execution = self._execute_workflow('examples.mistral-error-bad-wf-arg', {})
execution = self._wait_for_completion(
execution,
expect_tasks=False,
expect_tasks_completed=False
)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Invalid input', execution.result['extra']['state_info'])
def test_bad_task_transition(self):
execution = self._execute_workflow('examples.mistral-error-bad-task-transition', {})
execution = self._wait_for_completion(
execution,
expect_tasks=False,
expect_tasks_completed=False
)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn("Task 'task3' not found", execution.result['error'])
def test_bad_with_items(self):
execution = self._execute_workflow('examples.mistral-error-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Wrong input format', execution.result['extra']['state_info'])
def test_bad_expr_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-expr', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_publish_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-publish', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_subworkflow_input_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-subworkflow-input', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_task_transition_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-task-transition', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_with_items_yaql(self):
execution = self._execute_workflow('examples.mistral-test-yaql-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate YAQL expression', execution.result['extra']['state_info'])
def test_bad_expr_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-expr', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
# TODO: Currently, Mistral returns "UndefinedError ContextView object has no attribute".
# Need to fix Mistral to return "Cannot evaulate Jinja expression."
# self.assertIn('Can not evaluate Jinja expression',
# execution.result['extra']['state_info'])
def test_bad_publish_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-publish', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_subworkflow_input_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-subworkflow-input', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_task_transition_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-task-transition', {})
execution = self._wait_for_completion(execution)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
def test_bad_with_items_jinja(self):
execution = self._execute_workflow('examples.mistral-test-jinja-bad-with-items', {})
execution = self._wait_for_completion(execution, expect_tasks=False)
self._assert_failure(execution, expect_tasks_failure=False)
self.assertIn('Can not evaluate Jinja expression', execution.result['extra']['state_info'])
| [
"[email protected]"
] | |
23c0dd25543411644e979a4ed4368b85c6f49098 | 4dbaea97b6b6ba4f94f8996b60734888b163f69a | /LeetCode/8.py | 15f59ed2df0e448995c3a574ba4fa386c04f4725 | [] | no_license | Ph0en1xGSeek/ACM | 099954dedfccd6e87767acb5d39780d04932fc63 | b6730843ab0455ac72b857c0dff1094df0ae40f5 | refs/heads/master | 2022-10-25T09:15:41.614817 | 2022-10-04T12:17:11 | 2022-10-04T12:17:11 | 63,936,497 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
import re
minus = 1
if len(str) == 0:
return 0
str = str.strip()
i = 0
while i < len(str):
if i == 0 and (str[i] == '-' or str[i] == '+') and minus == 1:
minus = -1
elif str[i] not in ['0','1','2','3','4','5','6','7','8','9']:
break
i += 1
if i == 0 or (i == 1 and minus == -1):
return 0
res = int(str[0:i])
res = min(res, 2147483647)
res = max(res, -2147483648)
return res | [
"[email protected]"
] | |
1f29a592c39022e79242a176b8638f31728d0fba | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/190.py | 4ea85e66ef60f663dfa02f1f700dbd13bd15454c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from heapq import *
def read_ints():
return list(map(int, input().split()))
def solve(t):
N, r, o, y, g, b, v = read_ints()
if r == g != 0:
if o or y or b or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'RG'*r))
return
if y == v != 0:
if r or o or g or b:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'VY'*y))
return
if b == o != 0:
if r or y or g or v:
print('Case #{}: IMPOSSIBLE'.format(t))
else:
print('Case #{}: {}'.format(t, 'OB'*b))
return
r -= g
y -= v
b -= o
if r < 0 or y < 0 or b < 0:
print('Case #{}: IMPOSSIBLE'.format(t))
return
M = max(r, y, b)
h = [(-r, r != M, 'R'), (-y, y != M, 'Y'), (-b, b != M, 'B')]
heapify(h)
res = ''
count, _prio, ch = heappop(h)
while count < 0:
res += ch
count, _prio, ch = heapreplace(h, (count + 1, _prio, ch))
if res[-1] != res[0] and all(count == 0 for count, *_ in h):
res = res.replace('R', 'RG'*g + 'R', 1)
res = res.replace('Y', 'YV'*v + 'Y', 1)
res = res.replace('B', 'BO'*o + 'B', 1)
print('Case #{}: {}'.format(t, res))
else:
print('Case #{}: IMPOSSIBLE'.format(t))
if __name__ == "__main__":
for t in range(1, int(input())+1):
solve(t)
| [
"[email protected]"
] | |
fd95d5fbefacb5b37e09b549986f43d521ae44a2 | 21fec19cb8f74885cf8b59e7b07d1cd659735f6c | /chapter_8/dlg-custom.py | b1338fb1bb4b149b6737cc31b65a691d7ecc67ba | [
"MIT"
] | permissive | bimri/programming_python | ec77e875b9393179fdfb6cbc792b3babbdf7efbe | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | refs/heads/master | 2023-09-02T12:21:11.898011 | 2021-10-26T22:32:34 | 2021-10-26T22:32:34 | 394,783,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | "Custom Dialogs"
'''
Custom dialogs support arbitrary interfaces, but they are also the most complicated to
program. Even so, there’s not much to it—simply create a pop-up window as a
Toplevel with attached widgets, and arrange a callback handler to fetch user inputs
entered in the dialog (if any) and to destroy the window.
'''
import sys
from tkinter import *
makemodal = (len(sys.argv) > 1)
def dialog():
win = Toplevel() # make a new window
Label(win, text='Hard drive reformatted!').pack() # add a few widgets
Button(win, text='OK', command=win.destroy).pack() # set destroy callback
if makemodal:
win.focus_set() # take over input focus,
win.grab_set() # disable other windows while I'm open,
win.wait_window # and wait here until win destroyed
print('dialog exit') # else returns right away
root = Tk()
Button(root, text='popup', command=dialog).pack()
root.mainloop()
'''
Because dialogs are nonmodal in this mode, the
root window remains active after a dialog is popped up. In fact, nonmodal dialogs never
block other windows, so you can keep pressing the root’s button to generate as many
copies of the pop-up window as will fit on your screen.
'''
| [
"[email protected]"
] | |
1ced0778202d32bf5b35354803964d6939afc6ea | 9ac35a2327ca9fddcf55077be58a1babffd23bdd | /cadence/tests/test_errors.py | 6921b0a8d11e06f2d032e6cc1b4e6d0ef653cd7c | [
"MIT"
] | permissive | meetchandan/cadence-python | f1eb987c135f620607a62495096a89494216d847 | cfd7a48e6da7c289c9ae0c29c94d12d2b05986e4 | refs/heads/master | 2022-12-14T12:46:32.364375 | 2020-09-16T15:50:55 | 2020-09-16T15:50:55 | 260,763,097 | 1 | 0 | MIT | 2020-09-16T15:48:14 | 2020-05-02T19:47:56 | Python | UTF-8 | Python | false | false | 1,347 | py | from unittest import TestCase
from cadence.errors import find_error, InternalServiceError, WorkflowExecutionAlreadyStartedError
from cadence.thrift import cadence_thrift
class TestError(TestCase):
def setUp(self) -> None:
self.internalServiceError = cadence_thrift.shared.InternalServiceError("ERROR")
self.sessionAlreadyExistError = cadence_thrift.shared.WorkflowExecutionAlreadyStartedError("ERROR", "REQUEST-ID",
"RUN-ID")
def test_internal_server_error(self):
response = cadence_thrift.WorkflowService.StartWorkflowExecution.response(
internalServiceError=self.internalServiceError)
error = find_error(response)
self.assertIsInstance(error, InternalServiceError)
self.assertEqual("ERROR", error.message)
def test_session_already_exists_error(self):
response = cadence_thrift.WorkflowService.StartWorkflowExecution.response(
sessionAlreadyExistError=self.sessionAlreadyExistError)
error = find_error(response)
self.assertIsInstance(error, WorkflowExecutionAlreadyStartedError)
self.assertEqual("ERROR", error.message)
self.assertEqual("REQUEST-ID", error.start_request_id)
self.assertEqual("RUN-ID", error.run_id)
| [
"[email protected]"
] | |
47dd4b0d0b97967cfa1f6829d045d33383c9b932 | 96796bca1f00c5af89c695ff51691e977fda262c | /myEnvironments/multipleApps/multipleApps/urls.py | 1e1daa59868f00d10f30a34bb8adb6c29c2d563a | [] | no_license | LexiPearl/Python-Projects | 5be7ecb11ff7e332daf7b92d23e183511b67444c | c76ce5611d8abd8dfcdea24051cbdfe705a98ffd | refs/heads/master | 2021-01-19T11:35:50.624237 | 2017-04-28T04:13:13 | 2017-04-28T04:13:13 | 87,978,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | """multipleApps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.loginregistration.urls')),
url(r'^courses/users_courses/', include('apps.courses_users.urls')),
url(r'^courses/', include('apps.courses.urls')),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.