hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f701484ff1010bdc4f884896a66c7ea8a865748d
| 601
|
py
|
Python
|
pywick/modules/stn.py
|
achaiah/pywick
|
9d663faf0c1660a9b8359a6472c164f658dfc8cb
|
[
"MIT"
] | 408
|
2019-05-16T16:12:41.000Z
|
2022-03-26T17:27:12.000Z
|
pywick/modules/stn.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | 13
|
2019-05-17T05:47:06.000Z
|
2021-06-21T19:02:30.000Z
|
pywick/modules/stn.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | 42
|
2019-05-16T19:57:12.000Z
|
2022-03-06T15:23:18.000Z
|
import torch.nn as nn
from ..functions import F_affine2d, F_affine3d
class STN2d(nn.Module):
def __init__(self, local_net):
super(STN2d, self).__init__()
self.local_net = local_net
def forward(self, x):
params = self.local_net(x)
x_transformed = F_affine2d(x[0], params.view(2,3))
return x_transformed
class STN3d(nn.Module):
def __init__(self, local_net):
self.local_net = local_net
def forward(self, x):
params = self.local_net(x)
x_transformed = F_affine3d(x, params.view(3,4))
return x_transformed
| 20.724138
| 58
| 0.643927
|
import torch.nn as nn
from ..functions import F_affine2d, F_affine3d
class STN2d(nn.Module):
def __init__(self, local_net):
super(STN2d, self).__init__()
self.local_net = local_net
def forward(self, x):
params = self.local_net(x)
x_transformed = F_affine2d(x[0], params.view(2,3))
return x_transformed
class STN3d(nn.Module):
def __init__(self, local_net):
self.local_net = local_net
def forward(self, x):
params = self.local_net(x)
x_transformed = F_affine3d(x, params.view(3,4))
return x_transformed
| true
| true
|
f701488543d3d264fbb47120a1375c3f6be920eb
| 243
|
py
|
Python
|
linkograph/runTests.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | 6
|
2017-07-18T15:28:33.000Z
|
2020-03-03T14:45:45.000Z
|
linkograph/runTests.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | null | null | null |
linkograph/runTests.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | 3
|
2017-09-09T00:36:48.000Z
|
2020-03-03T14:45:49.000Z
|
#!/usr/bin/env python3
"""Set up file for running tests."""
import unittest
def test():
loader = unittest.TestLoader()
testSuite = loader.discover('linkograph.tests')
runner = unittest.TextTestRunner()
runner.run(testSuite)
| 20.25
| 51
| 0.695473
|
import unittest
def test():
loader = unittest.TestLoader()
testSuite = loader.discover('linkograph.tests')
runner = unittest.TextTestRunner()
runner.run(testSuite)
| true
| true
|
f70149861134d92cda0cf61e5ac81b2b7020e4b1
| 315
|
py
|
Python
|
frazzl/services/sandbox_app2/sandbox_app2/app.py
|
jimtheplant/qraphql-booster
|
cc905310ca19c32e8c555c54069a0ac5b127d505
|
[
"Apache-2.0"
] | 2
|
2019-11-20T16:18:53.000Z
|
2020-05-25T11:00:58.000Z
|
frazzl/services/sandbox_app2/sandbox_app2/app.py
|
jimtheplant/qraphql-booster
|
cc905310ca19c32e8c555c54069a0ac5b127d505
|
[
"Apache-2.0"
] | null | null | null |
frazzl/services/sandbox_app2/sandbox_app2/app.py
|
jimtheplant/qraphql-booster
|
cc905310ca19c32e8c555c54069a0ac5b127d505
|
[
"Apache-2.0"
] | null | null | null |
from frazzl import Service
from ariadne import QueryType
schema = """
type Query {
getTest2: Test2
}
type Test2 {
test1: String
}
"""
query = QueryType()
def resolve_getTest2(*args, **kwargs):
return
query.set_field("getTest2", resolve_getTest2)
testService = Service("testService2", schema, query)
| 15.75
| 52
| 0.714286
|
from frazzl import Service
from ariadne import QueryType
schema = """
type Query {
getTest2: Test2
}
type Test2 {
test1: String
}
"""
query = QueryType()
def resolve_getTest2(*args, **kwargs):
return
query.set_field("getTest2", resolve_getTest2)
testService = Service("testService2", schema, query)
| true
| true
|
f7014a6bc748d2a3d40bca567ad4da305840869e
| 5,520
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class VirtualMachineScaleSet(Resource):
"""Describes a Virtual Machine Scale Set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param sku: The virtual machine scale set sku.
:type sku: ~azure.mgmt.compute.v2018_06_01.models.Sku
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2018_06_01.models.Plan
:param upgrade_policy: The upgrade policy.
:type upgrade_policy: ~azure.mgmt.compute.v2018_06_01.models.UpgradePolicy
:param virtual_machine_profile: The virtual machine profile.
:type virtual_machine_profile:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineScaleSetVMProfile
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param overprovision: Specifies whether the Virtual Machine Scale Set
should be overprovisioned.
:type overprovision: bool
:ivar unique_id: Specifies the ID which uniquely identifies a Virtual
Machine Scale Set.
:vartype unique_id: str
:param single_placement_group: When true this limits the scale set to a
single placement group, of max size 100 virtual machines.
:type single_placement_group: bool
:param zone_balance: Whether to force strictly even Virtual Machine
distribution cross x-zones in case there is zone outage.
:type zone_balance: bool
:param platform_fault_domain_count: Fault Domain count for each placement
group.
:type platform_fault_domain_count: int
:param identity: The identity of the virtual machine scale set, if
configured.
:type identity:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineScaleSetIdentity
:param zones: The virtual machine scale set zones.
:type zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, location: str, tags=None, sku=None, plan=None, upgrade_policy=None, virtual_machine_profile=None, overprovision: bool=None, single_placement_group: bool=None, zone_balance: bool=None, platform_fault_domain_count: int=None, identity=None, zones=None, **kwargs) -> None:
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.upgrade_policy = upgrade_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.identity = identity
self.zones = zones
| 47.179487
| 294
| 0.666304
|
from .resource_py3 import Resource
class VirtualMachineScaleSet(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'unique_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'plan': {'key': 'plan', 'type': 'Plan'},
'upgrade_policy': {'key': 'properties.upgradePolicy', 'type': 'UpgradePolicy'},
'virtual_machine_profile': {'key': 'properties.virtualMachineProfile', 'type': 'VirtualMachineScaleSetVMProfile'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'overprovision': {'key': 'properties.overprovision', 'type': 'bool'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'single_placement_group': {'key': 'properties.singlePlacementGroup', 'type': 'bool'},
'zone_balance': {'key': 'properties.zoneBalance', 'type': 'bool'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'identity': {'key': 'identity', 'type': 'VirtualMachineScaleSetIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, location: str, tags=None, sku=None, plan=None, upgrade_policy=None, virtual_machine_profile=None, overprovision: bool=None, single_placement_group: bool=None, zone_balance: bool=None, platform_fault_domain_count: int=None, identity=None, zones=None, **kwargs) -> None:
super(VirtualMachineScaleSet, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.plan = plan
self.upgrade_policy = upgrade_policy
self.virtual_machine_profile = virtual_machine_profile
self.provisioning_state = None
self.overprovision = overprovision
self.unique_id = None
self.single_placement_group = single_placement_group
self.zone_balance = zone_balance
self.platform_fault_domain_count = platform_fault_domain_count
self.identity = identity
self.zones = zones
| true
| true
|
f7014b607675c6e0abeb3819fa28752a7938ea79
| 836
|
py
|
Python
|
setup.py
|
arthurzam/pkgdev
|
cd0890d04544f913802210c94e03c79bedd8336a
|
[
"BSD-3-Clause"
] | 8
|
2021-02-27T11:14:12.000Z
|
2022-03-24T14:56:38.000Z
|
setup.py
|
arthurzam/pkgdev
|
cd0890d04544f913802210c94e03c79bedd8336a
|
[
"BSD-3-Clause"
] | 55
|
2021-02-27T11:16:43.000Z
|
2022-03-29T12:19:30.000Z
|
setup.py
|
arthurzam/pkgdev
|
cd0890d04544f913802210c94e03c79bedd8336a
|
[
"BSD-3-Clause"
] | 4
|
2021-03-05T22:34:45.000Z
|
2022-03-21T21:36:09.000Z
|
#!/usr/bin/env python3
from itertools import chain
from setuptools import setup
from snakeoil.dist import distutils_extensions as pkgdist
pkgdist_setup, pkgdist_cmds = pkgdist.setup()
setup(**dict(
pkgdist_setup,
license='BSD',
author='Tim Harder',
author_email='[email protected]',
description='collection of tools for Gentoo development',
url='https://github.com/pkgcore/pkgdev',
data_files=list(chain(
pkgdist.data_mapping('share/bash-completion/completions', 'completion/bash'),
pkgdist.data_mapping('share/zsh/site-functions', 'completion/zsh'),
)),
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
))
| 28.827586
| 85
| 0.673445
|
from itertools import chain
from setuptools import setup
from snakeoil.dist import distutils_extensions as pkgdist
pkgdist_setup, pkgdist_cmds = pkgdist.setup()
setup(**dict(
pkgdist_setup,
license='BSD',
author='Tim Harder',
author_email='[email protected]',
description='collection of tools for Gentoo development',
url='https://github.com/pkgcore/pkgdev',
data_files=list(chain(
pkgdist.data_mapping('share/bash-completion/completions', 'completion/bash'),
pkgdist.data_mapping('share/zsh/site-functions', 'completion/zsh'),
)),
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
))
| true
| true
|
f7014cf013e416c1756bd9afde14cf6f88db909a
| 8,039
|
py
|
Python
|
src/settings.py
|
shaniaki/f2dot
|
7a2c1e47e884cc699a111ed7bf8711cea3e86ee6
|
[
"BSD-3-Clause"
] | null | null | null |
src/settings.py
|
shaniaki/f2dot
|
7a2c1e47e884cc699a111ed7bf8711cea3e86ee6
|
[
"BSD-3-Clause"
] | null | null | null |
src/settings.py
|
shaniaki/f2dot
|
7a2c1e47e884cc699a111ed7bf8711cea3e86ee6
|
[
"BSD-3-Clause"
] | null | null | null |
'''
* File: settings.py
* Author: George Ungureanu <[email protected]>
* Purpose: This file contains methods for collecting configuration options
and initialize the settings object which holds the parameters
throughout the program execution.
* License: BSD3
'''
'''
Copyright (c) 2014, George Ungureanu
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import __init__
import os
import re
import utils
import logging
## Model class for storing configuration parameters
#
# This class is a container for the configuration settins and
# provides methods to gather or parse from two main sources: the
# configuration file and the comman-line arguments
class Settings:
## Class constructor
# @param Settings $self
# The object pointer
# @param ArgumentParser $args
# The comman-line arguments
def __init__(self, args):
self.logger = logging.getLogger('f2dot.settings')
self.logger.debug('Configuring the runtime execution...')
self.runPath = os.path.dirname(os.path.abspath(__file__))
self.configFileName = args.mode + '.conf'
# if -g option chosen
if args.generate_config:
path = args.output
if not path:
path = os.getcwd()
self.createConfFile(path, force=True)
self.logger.info('Generated config file in ' + path)
os._exit(1)
# set paths & names
self.inPathAndFile = os.path.abspath(args.input)
self.inPath, self.inFile = os.path.split(self.inPathAndFile)
if args.output:
self.outPath = os.path.abspath(args.output)
else:
self.outPath = self.inPath
# resolve config file
if args.config:
self.confFile = os.path.abspath(args.config)
else:
self.confFile = self.createConfFile(self.inPath, force=False)
self.logger.info("Using the configuration in %s", self.confFile)
for line in open(self.confFile):
if line.strip().startswith("# works with : f2dot"):
confVer = line.strip().split("# works with : f2dot-",1)[1]
if not confVer == __init__.__version__:
self.logger.warn('The config file was created by another version '
+ 'of the tool. Errors may occur.')
self.settingDict = {}
self.constraintDict = {}
# loading default settings & constraints
for line in utils.getConfigInSection(os.path.join(self.runPath,'config','general.conf'), '[default settings]'):
tag, value = utils.strBeforeAfter(line,"=")
self.settingDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config',self.configFileName), '[default settings]'):
tag, value = utils.strBeforeAfter(line,"=")
self.settingDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config','general.conf'), '[setting constraints]'):
tag, value = utils.strBeforeAfter(line,"=")
self.constraintDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config',self.configFileName), '[setting constraints]'):
tag, value = utils.strBeforeAfter(line,"=")
self.constraintDict[tag] = value
# loading custom settings and comparing them against the constraints
for line in utils.getConfigInSection(self.confFile):
tag, value = utils.strBeforeAfter(line,"=")
if tag in self.constraintDict:
if self.constraintDict[tag]:
pattern=re.compile(self.constraintDict[tag])
if not pattern.match(value):
self.logger.warn("The value for %s (%s) does not match pattern %s. Choosing the default value: %s",
tag, value, self.constraintDict[tag], self.settingDict[tag])
continue
self.settingDict[tag] = value
if args.format:
self.settingDict['FORMAT'] = args.format
if args.prog:
self.settingDict['PROG'] = args.prog
self.outPathAndFile = os.path.join(self.outPath, utils.getFileName(self.inFile) + '.' + self.settingDict['FORMAT'])
self.logger.debug('Runtime configuration successful')
## Creates a config file in the specified path.
# @param str $path
# The directory where the configuration file should be
# @param bool $force
# \cTrue to overwrite existing configuration file
# @return A string with the absolute path to the config file
def createConfFile(self, path, force=False):
confFile=os.path.join(path, self.configFileName)
if (os.path.isfile(confFile)) and not force:
return confFile
with open(confFile,'w') as f:
header = '' +\
'# file : ' + self.configFileName + ' \n' +\
'# description : automatically generated configuration file\n' +\
'# usage : change the right-hand values as suggested \n' +\
'# works with : f2dot-' + __init__.__version__ + '\n' +\
'# ####################################################################\n'
f.write(header)
utils.copySection(os.path.join(self.runPath,'config','general.conf'), confFile, '[default settings]')
utils.copySection(os.path.join(self.runPath,'config',self.configFileName), confFile, '[default settings]')
return confFile
## Method to enable treating a Settings object as a dictionary.
# @param str $key
# the setting name, as defined in the .conf file
# @return The value of the config parameter with the name 'key'
def __getitem__(self, key):
return self.settingDict[key]
## Prints the current settings
# @param Settings $self The object pointer
def printSettings(self):
msg = 'The current settings are:\n' \
+ '\t* runPath : ' + self.runPath + '\n' \
+ '\t* inPathAndFile : ' + self.inPathAndFile + '\n' \
+ '\t* inPath : ' + self.inPath + '\n' \
+ '\t* inFile : ' + self.inFile + '\n' \
+ '\t* outPath : ' + self.outPath + '\n' \
+ '\t* outPathAndFile : ' + self.outPathAndFile + '\n' \
+ '\t* confFileName : ' + self.outPathAndFile + '\n' \
+ '\t* confFile : ' + self.configFileName + '\n'
for key, value in self.settingDict.iteritems():
msg = msg + '\t* ' + key + " : " + value + '\n'
return msg
## @var logger
# Logger (logging object)
## @var runPath
# The path where the runnable is located (str)
## @var inPathAndFile
# The full path to the input file (str)
## @var inFile
# Input file name (str)
## @var outPath
# Absolute path to the output directory (str)
## @var configFileName
# Name of the configuration file based on the parse mode (str)
## @var confFile
# Absolte path to the configuration file (str)
## @var outPathAndFile
# Absolute path to the output file (str)
## @var settingDict
# Dictionary containing all other settings (dict)
## @var constraintDict
# Dictionary containing lists with allowed values for the same keys in settingDict
| 37.919811
| 121
| 0.69735
|
import __init__
import os
import re
import utils
import logging
class Settings:
def __init__(self, args):
self.logger = logging.getLogger('f2dot.settings')
self.logger.debug('Configuring the runtime execution...')
self.runPath = os.path.dirname(os.path.abspath(__file__))
self.configFileName = args.mode + '.conf'
if args.generate_config:
path = args.output
if not path:
path = os.getcwd()
self.createConfFile(path, force=True)
self.logger.info('Generated config file in ' + path)
os._exit(1)
self.inPathAndFile = os.path.abspath(args.input)
self.inPath, self.inFile = os.path.split(self.inPathAndFile)
if args.output:
self.outPath = os.path.abspath(args.output)
else:
self.outPath = self.inPath
if args.config:
self.confFile = os.path.abspath(args.config)
else:
self.confFile = self.createConfFile(self.inPath, force=False)
self.logger.info("Using the configuration in %s", self.confFile)
for line in open(self.confFile):
if line.strip().startswith("# works with : f2dot"):
confVer = line.strip().split("# works with : f2dot-",1)[1]
if not confVer == __init__.__version__:
self.logger.warn('The config file was created by another version '
+ 'of the tool. Errors may occur.')
self.settingDict = {}
self.constraintDict = {}
for line in utils.getConfigInSection(os.path.join(self.runPath,'config','general.conf'), '[default settings]'):
tag, value = utils.strBeforeAfter(line,"=")
self.settingDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config',self.configFileName), '[default settings]'):
tag, value = utils.strBeforeAfter(line,"=")
self.settingDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config','general.conf'), '[setting constraints]'):
tag, value = utils.strBeforeAfter(line,"=")
self.constraintDict[tag] = value
for line in utils.getConfigInSection(os.path.join(self.runPath,'config',self.configFileName), '[setting constraints]'):
tag, value = utils.strBeforeAfter(line,"=")
self.constraintDict[tag] = value
for line in utils.getConfigInSection(self.confFile):
tag, value = utils.strBeforeAfter(line,"=")
if tag in self.constraintDict:
if self.constraintDict[tag]:
pattern=re.compile(self.constraintDict[tag])
if not pattern.match(value):
self.logger.warn("The value for %s (%s) does not match pattern %s. Choosing the default value: %s",
tag, value, self.constraintDict[tag], self.settingDict[tag])
continue
self.settingDict[tag] = value
if args.format:
self.settingDict['FORMAT'] = args.format
if args.prog:
self.settingDict['PROG'] = args.prog
self.outPathAndFile = os.path.join(self.outPath, utils.getFileName(self.inFile) + '.' + self.settingDict['FORMAT'])
self.logger.debug('Runtime configuration successful')
def createConfFile(self, path, force=False):
confFile=os.path.join(path, self.configFileName)
if (os.path.isfile(confFile)) and not force:
return confFile
with open(confFile,'w') as f:
header = '' +\
'# file : ' + self.configFileName + ' \n' +\
'# description : automatically generated configuration file\n' +\
'# usage : change the right-hand values as suggested \n' +\
'# works with : f2dot-' + __init__.__version__ + '\n' +\
'# ####################################################################\n'
f.write(header)
utils.copySection(os.path.join(self.runPath,'config','general.conf'), confFile, '[default settings]')
utils.copySection(os.path.join(self.runPath,'config',self.configFileName), confFile, '[default settings]')
return confFile
def __getitem__(self, key):
return self.settingDict[key]
def printSettings(self):
msg = 'The current settings are:\n' \
+ '\t* runPath : ' + self.runPath + '\n' \
+ '\t* inPathAndFile : ' + self.inPathAndFile + '\n' \
+ '\t* inPath : ' + self.inPath + '\n' \
+ '\t* inFile : ' + self.inFile + '\n' \
+ '\t* outPath : ' + self.outPath + '\n' \
+ '\t* outPathAndFile : ' + self.outPathAndFile + '\n' \
+ '\t* confFileName : ' + self.outPathAndFile + '\n' \
+ '\t* confFile : ' + self.configFileName + '\n'
for key, value in self.settingDict.iteritems():
msg = msg + '\t* ' + key + " : " + value + '\n'
return msg
| true
| true
|
f7014f0ab9504f09ce22fbb6c71a0bd07efbbf8c
| 4,095
|
py
|
Python
|
flaskish.py
|
baverman/telenot
|
5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e
|
[
"MIT"
] | null | null | null |
flaskish.py
|
baverman/telenot
|
5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e
|
[
"MIT"
] | null | null | null |
flaskish.py
|
baverman/telenot
|
5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e
|
[
"MIT"
] | 1
|
2020-09-21T14:22:10.000Z
|
2020-09-21T14:22:10.000Z
|
from __future__ import print_function
from functools import wraps
import logging
try:
import ujson as json
except ImportError:
import json
from flask import Flask as _Flask
from flask.globals import _request_ctx_stack
from werkzeug.wrappers import Response
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException
_Request = _Flask.request_class
class cached_property(object):
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class ApiError(Exception):
status_code = 500
error = 'internal-error'
def __init__(self, error=None, status_code=None, **kwargs):
self.status_code = status_code or self.status_code
self.error = error or self.error
self.details = kwargs
def to_json(self):
data = {'error': self.error}
self.details and data.update(self.details)
return data
class Request(_Request):
def __init__(self, *args, **kwargs):
_Request.__init__(self, *args, **kwargs)
self._response = None
@cached_property
def response(self):
self._response = HeaderResponse()
return self._response
def process_response(self, response):
headers = self._response and self._response.headers
if headers:
response.headers._list.extend(headers)
return response
class HeaderResponse(Response):
def __init__(self):
self.headers = Headers()
class Flask(_Flask):
request_class = Request
def __init__(self, *args, **kwargs):
_Flask.__init__(self, *args, **kwargs)
self.url_map.strict_slashes = False
self.endpoint_counter = 0
self._logger = logging.getLogger(self.logger_name)
def route(self, rule, endpoint=None, weight=None, **options):
if weight is not None:
weight = False, -9999, weight
def decorator(func):
lendpoint = endpoint
if not lendpoint:
lendpoint = '{}_{}'.format(func.__name__, self.endpoint_counter)
self.endpoint_counter += 1
self.add_url_rule(rule, lendpoint, func, **options)
if weight:
self.url_map._rules[-1].match_compare_key = lambda: weight
return func
return decorator
def api(self, *args, **kwargs):
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
try:
result = func(*args, **kwargs)
except ApiError as e:
result = e
except HTTPException as e:
result = e
except Exception:
self.logger.exception('Unhandled error')
result = ApiError()
if isinstance(result, Response):
return result
elif isinstance(result, ApiError):
code = result.status_code
result = result.to_json()
else:
code = 200
return self.response_class(json.dumps(result, ensure_ascii=False), code,
content_type='application/json')
return self.route(*args, **kwargs)(inner)
return decorator
def process_response(self, response):
response = _request_ctx_stack.top.request.process_response(response)
return _Flask.process_response(self, response)
def print_routes(self, sort=False):
rules = self.url_map.iter_rules()
if sort:
rules = sorted(rules, key=lambda r: r.rule)
for rule in rules:
func = self.view_functions[rule.endpoint]
print('{:10} {}\t{}.{}'.format(
','.join(rule.methods),
rule.rule,
func.__module__,
func.__name__))
| 30.559701
| 88
| 0.588767
|
from __future__ import print_function
from functools import wraps
import logging
try:
import ujson as json
except ImportError:
import json
from flask import Flask as _Flask
from flask.globals import _request_ctx_stack
from werkzeug.wrappers import Response
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException
_Request = _Flask.request_class
class cached_property(object):
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class ApiError(Exception):
status_code = 500
error = 'internal-error'
def __init__(self, error=None, status_code=None, **kwargs):
self.status_code = status_code or self.status_code
self.error = error or self.error
self.details = kwargs
def to_json(self):
data = {'error': self.error}
self.details and data.update(self.details)
return data
class Request(_Request):
def __init__(self, *args, **kwargs):
_Request.__init__(self, *args, **kwargs)
self._response = None
@cached_property
def response(self):
self._response = HeaderResponse()
return self._response
def process_response(self, response):
headers = self._response and self._response.headers
if headers:
response.headers._list.extend(headers)
return response
class HeaderResponse(Response):
def __init__(self):
self.headers = Headers()
class Flask(_Flask):
request_class = Request
def __init__(self, *args, **kwargs):
_Flask.__init__(self, *args, **kwargs)
self.url_map.strict_slashes = False
self.endpoint_counter = 0
self._logger = logging.getLogger(self.logger_name)
def route(self, rule, endpoint=None, weight=None, **options):
if weight is not None:
weight = False, -9999, weight
def decorator(func):
lendpoint = endpoint
if not lendpoint:
lendpoint = '{}_{}'.format(func.__name__, self.endpoint_counter)
self.endpoint_counter += 1
self.add_url_rule(rule, lendpoint, func, **options)
if weight:
self.url_map._rules[-1].match_compare_key = lambda: weight
return func
return decorator
def api(self, *args, **kwargs):
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
try:
result = func(*args, **kwargs)
except ApiError as e:
result = e
except HTTPException as e:
result = e
except Exception:
self.logger.exception('Unhandled error')
result = ApiError()
if isinstance(result, Response):
return result
elif isinstance(result, ApiError):
code = result.status_code
result = result.to_json()
else:
code = 200
return self.response_class(json.dumps(result, ensure_ascii=False), code,
content_type='application/json')
return self.route(*args, **kwargs)(inner)
return decorator
def process_response(self, response):
response = _request_ctx_stack.top.request.process_response(response)
return _Flask.process_response(self, response)
def print_routes(self, sort=False):
rules = self.url_map.iter_rules()
if sort:
rules = sorted(rules, key=lambda r: r.rule)
for rule in rules:
func = self.view_functions[rule.endpoint]
print('{:10} {}\t{}.{}'.format(
','.join(rule.methods),
rule.rule,
func.__module__,
func.__name__))
| true
| true
|
f7014fea1b031c06902ccaa5d7500866eec73713
| 14,152
|
py
|
Python
|
pp-cdmx/public_account/public_account_mixins/cleaner_mix.py
|
rickrebel/race-history
|
be93b88cf4658fd2c5ec409d8f422b2960d1ae60
|
[
"MIT"
] | 1
|
2020-08-31T21:08:54.000Z
|
2020-08-31T21:08:54.000Z
|
pp-cdmx/public_account/public_account_mixins/cleaner_mix.py
|
rickrebel/race-history
|
be93b88cf4658fd2c5ec409d8f422b2960d1ae60
|
[
"MIT"
] | 1
|
2021-06-10T23:09:45.000Z
|
2021-06-10T23:09:45.000Z
|
pp-cdmx/public_account/public_account_mixins/cleaner_mix.py
|
rickrebel/race-history
|
be93b88cf4658fd2c5ec409d8f422b2960d1ae60
|
[
"MIT"
] | 1
|
2021-07-16T19:32:04.000Z
|
2021-07-16T19:32:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from scripts.data_cleaner import set_new_error
#from scripts.data_cleaner_v2 import calculateNumber
class PublicAccountCleanerMix:
# cleaning
def column_formatter_v3(self, reset=False, image_num=None):
from public_account.models import PPImage, Row
print
print
print "----Cuenta publica %s, id: %s----" % (self, self.id)
# from scripts.data_cleaner_v3 import (
# get_normal_name,
# clean_text,
# calculate_special_formats_v3)
# import numpy
# suburbs_dict = []
all_images = PPImage.objects.filter(public_account=self)
if image_num:
all_images = all_images.filter(path__icontains=image_num)
if reset:
self.reset(all_images)
all_images = all_images.order_by("path")
# Se obtienen los formatos de cada uno de los
special_formats = self.calculate_special_formats_v3(
all_images, column_types[3:], image_num)
"""
Una vez obtenido los valores de special_formats, se tratan los datos:
"""
if not special_formats:
# si el calculo de special_formats no trae informacion, crashea
# todo el proceso, hasta arreglarlo no se procesaran las imagenes
set_new_error(
self, "error al calcular special_formats: %s" %
special_formats)
set_new_error(
self, "No se pocreso ningun imagen.table_data")
all_images = []
for image in all_images:
print u" %s" % image
# Intentamos obtener los datos que nos interesan
# print image.path
# Por cada fila de datos:
all_rows = Row.objects.filter(image=image)
if not all_rows.count():
set_new_error(
self, "La imagen %s no proceso Table Data" % image)
for row in all_rows:
errors = row.get_errors()
# Intentamos obtener de forma simple el id de la colonia.
vision_data = row.get_vision_data()
row_data = []
valid_row = None
for idx, col in enumerate(vision_data):
if idx > 2:
col_ref = column_types[idx]
special_format = special_formats[idx - 3]
final_value, c_errors = calculateNumber(
col, col_ref, special_format)
if len(c_errors):
errors += c_errors
final_value = None
elif idx:
final_value = clean_text(col)
else:
final_value = get_normal_name(col)
valid_row = final_value
row_data.append(final_value)
if (final_value not in [None, ""] and
idx and final_value is not False):
setattr(row, column_types[idx]["field"], final_value)
#print vision_data[0]
#print row_data
if valid_row is False:
continue
row.formatted_data = json.dumps(row_data)
if not valid_row:
errors.append("Fila sin información de colonia")
#else:
#row.sequential = seq
row.errors = json.dumps(errors)
#row = set_values_row(row)
row.save()
# return
def calculate_special_formats_v3(
self, all_images, columns_nums, image_num):
variables = self.get_variables()
# si ya se había canculado el special_formats, simplemente se obtiene
# if "special_formats" in variables:
# return variables["special_formats"]
# si no, se calcula:
# Se trabajará solo con las columnas numéricas, que son las últimas 5
count_rows = [0, 0, 0, 0, 0]
special_format_count = [0, 0, 0, 0, 0]
special_formats = [False, False, False, False, False]
for image in all_images[:3]:
for row in image.get_table_data():
# Se trabajarán solo con los las últimas 5 columnas
for idx, value in enumerate(row[3:]):
sum_col = calculateNumber(value, columns_nums[idx])
# Solo se sumarán si la función arrojó algún número
if sum_col is not None:
special_format_count[idx] += sum_col
count_rows[idx] += 1
# Se puede detarminar una tendencia de tener algún formato
# especial si existen al menos 5 datos con formato válido
for idx, col in enumerate(columns_nums):
curr_tot = float(count_rows[idx])
is_special = special_format_count[
idx] / curr_tot >= 0.75 if curr_tot else False
special_formats.append(is_special)
variables["special_formats"] = special_formats
self.variables = json.dumps(variables)
self.save()
return special_formats
column_types = [
{
"name": "suburb",
"title": u"Colonia",
"type": "fk"
},
{
"name": "project",
"title": u"Proyecto",
"field": "project_name",
"type": "text"
},
{
"name": "description",
"title": u"Descripción",
"field": "description",
"type": "text"
},
{
"name": "progress",
"title": u"Avance",
"field": "progress",
"type": "number",
"idx": 3
},
{
"name": "approved",
"title": u"Aprobado",
"field": "approved",
"type": "ammount",
"idx": 4
},
{
"name": "modified",
"title": u"Modificado",
"field": "modified",
"type": "ammount",
"idx": 5
},
{
"name": "executed",
"title": u"Ejecutado",
"field": "executed",
"type": "ammount",
"idx": 6
},
{
"name": "variation",
"title": u"Variación",
"field": "variation",
"type": "number",
"idx": 7
},
]
# Esta función hace una estandarización y limpieza de nombres para facilitar
# encontrar similitudes entre los nombres oficiales y los que ponen las
# alcaldías
def cleanSuburbName(text):
import unidecode
# Primero estandarizamos los carácteres de español como acentos o eñes
try:
final_name = unidecode.unidecode(text).upper()
except Exception as e:
print e
final_name = text.upper()
# Sustituimos el signo "OR" (|) por I latina
final_name = re.sub(r'(\|)', 'I', final_name)
# Nos vamos a quedar con números, letras espacios y muy pocos otros
# caracteres: guiones bajos y diagonales (slashs) por que con ellos se
# distinguen algunas colonias. Lo demás, simplemento lo eliminamos:
final_name = re.sub(ur'[^0-9A-Z\s\(\)\_\-\/\º\.\,]', '', final_name)
final_name = final_name.upper()
# Sustituimos puntos, comas y guiones por espacios, lo cuales se añaden sin
# ningún patrón y sólo contribuyen a reducir el nivel de coincidencia.
final_name = re.sub(r'[\.\,\-]', ' ', final_name)
# quitamos dobles espacios
final_name = re.sub(r' +', ' ', final_name)
# eliminamos espacios entre paréntesis , ej: ( U HAB ) --> (U HAB)
final_name = re.sub(r'\(\s?', r'(', final_name)
final_name = re.sub(r'\s?\)', r')', final_name)
# Algunos remplazos comunes de alternativas de nombramiento:
re_uhab = re.compile(
r'\(\s?(CONJ HAB|UNIDAD HABITACIONAL|U HABS'
r'|CONJUNTO HABITACIONAL)\s?\)')
final_name = re.sub(re_uhab, r'(U HAB)', final_name)
final_name = re.sub(r'\(\s?(FRACCIONAMIENTO)\s?\)', '(FRACC)', final_name)
final_name = re.sub(ur'\(\s?(AMPLIACION|AMPLIACIÓN)\s?\)',
'(AMPL)', final_name)
final_name = re.sub(ur'^(COMITE|COMITÉ|COLONIA)\s', '', final_name)
# Eliminamos la clave en el normal_name (se hace un análisis aparte)
# La clave de las colonias tiene el formato DD-AAA donde DD es la clave de
# la alcaldía a dos dígitos AAA es la de la colonia a 3 dígitos.
# El divisor, el OCR lo puede traducir como cualquier caracter no numérico.
re_cve = re.compile(r'(\(?\d{2})\s?\D?\s?(\d{3}\)?)')
final_name = re.sub(re_cve, '', final_name)
# quitamos espacios al principio y al final
final_name = final_name.strip()
# quitamos dobles espacios, de nuevo
final_name = re.sub(r' +', ' ', final_name)
return final_name
def get_normal_name(text):
normal_name = cleanSuburbName(text)
raw_non_spaces = len(re.sub(r'[^\w]', '', normal_name))
# Validadores para excluir palabras que ya conocemos y que sabemos que
# son parte de los encabezados o de los pie de página y no contienen datos.
if bool(re.search(
r'(COLONIA O PUEBLO|ORIGINARIO|UNIDAD RESPON)', normal_name)):
return False
# Esto significa que llegamos al final, son palabras que no están en
# ninguna colonia y siempre están en el pie de página.
elif bool(re.search(r'(REFIERE|REMANENTE|TOTAL'
r'|AUTORI|ELABORO|LABORADO|DIRECTOR)', normal_name)):
return False
# Ninguna Colonia tiene un nombre tan corto, entonces devolvemos un None
# para saber que por sí misma no puede ser una colonia, es el caso de que
# se coma letras de otra columna
elif raw_non_spaces < 4:
return None
return normal_name
def clean_text(text):
# final_text = unidecode.unidecode(text)
final_text = text
final_text = final_text.strip()
final_text = re.sub(r' +', ' ', final_text)
final_text = re.sub(r'(\(|\¿|\¡)\s?', '\\1', final_text)
final_text = re.sub(r'\s?(\)|\:|\,|\.|\;|\?|\!)', '\\1', final_text)
final_text = re.sub(r'(\)|\:|\,|\.|\;|\?|\!)(\w)', '\\1 \\2', final_text)
final_text = final_text.strip()
return final_text
def calculateNumber(text, column, has_special_format=None):
errors = []
is_ammount = column["type"] == "ammount"
new_value = text
#Sustituimos las Bs por 8
new_value = re.sub(r'(B)', '8', new_value)
#Sustituimos las Os por 0
new_value = re.sub(r'(O)', '0', new_value)
#Quitamos los paréntesis que hacen ruido con (100) y (1)
new_value = re.sub(r'[\(\)]', '', new_value)
#Sustituimos las Ss por 5
new_value = re.sub(r'(s|S)', '5', new_value)
#Sustituimos los diagonales por comas
new_value = re.sub(r'(/)', ',', new_value)
#por si se trata de una división entre 0 impresa
has_error_excel = bool(re.search(r'D.V', new_value))
#Nos quedamos con números y algunas letras, no más:
new_value = re.sub(r'[^0-9\,\.\-\%]', '', new_value)
# Limpieza básica de espacios:
# new_value = new_value.strip()
# Se quitan los espacios alrededor de puntos y comas (siempre a puntos)
# 4 , 5 --> 4,5
# new_value = re.sub(r'(\d)\s?[\.\,]\s?(\d)', '\\1.\\2', new_value)
# Se sustituyen las comas por puntos
new_value = re.sub(r'(\,)', '.', new_value)
# Patrón REGEX para números (montos) válidos.
re_ammount = re.compile(r'^\d{1,7}(\.\d{2})?$')
# Patrón REGEX para porcentajes válidos.
re_percent = re.compile(r'^\-?\d{1,3}(\.\d{1,2})?[4895%]?\)?$')
re_compara = re_ammount if is_ammount else re_percent
has_percent = re.compile(r'[4895%]$')
has_decimals = re.compile(r'\d{2}$')
re_format = has_decimals if is_ammount else has_percent
# if not is_ammount:
# # Se quita el espacio entre el número y el porcentaje, en caso de
# # existir.
# new_value = re.sub(r'(\d)\s?%', '\\1%', new_value)
# # Se quitan los espacios después del abrir paréntesis y antes de
# # cerrarlos
# new_value = re.sub(r'\(\s?(.*)(\S+)\s?\)', '(\\1\\2)', new_value)
# new_value = re.sub(r'\(\s?(.+)\s?\)', '\\1', new_value)
##else:
if is_ammount:
#Si después de los puntos hay 3 caracteres, los eliminamos,
#con la idea de dejar máximo un punto decimal
new_value = re.sub(r'\.(\d{3})', '\\1', new_value)
#Se busca si tienen alguno de los formatos posibles:
correct_format = bool(re_compara.search(new_value))
#si se trata de un simple conteo de formatos especiales:
if has_special_format is None:
if not correct_format:
return None
return 1 if bool(re_format.search(new_value)) else 0
if not correct_format:
if has_error_excel and not is_ammount:
new_value = '0%' if has_special_format else '0'
else:
err = u"Formato incorrecto en columna %s" % column["title"]
errors.append(err)
only_ints = new_value
#Limpieza de porcentajes
if (has_special_format and not is_ammount):
only_ints = re.sub(re_format, '', only_ints)
#Forzamos un número flotante para su procesamiento como número
try:
float_value = float(only_ints)
except Exception as e:
only_ints = re.sub(re_format, '', only_ints)
try:
float_value = float(only_ints)
except Exception as e:
errors.append(
u"No se pudo converir número en columna %s"
% column["title"])
if only_ints:
try:
print ("error al convertir en calculateNumber: \"%s\""
% text)
except Exception as e:
pass
print e
return None, errors
#Algunos números que si los obtenemos, significa un problema
if (is_ammount and 0 < float_value < 1000) or float_value > 10000000:
errors.append(u"Número inválido en columna %s" % column["title"])
elif not is_ammount and float_value > 2:
float_value = float_value / float(100)
return float_value, errors
| 38.772603
| 79
| 0.577162
|
from __future__ import unicode_literals
import json
import re
from scripts.data_cleaner import set_new_error
class PublicAccountCleanerMix:
def column_formatter_v3(self, reset=False, image_num=None):
from public_account.models import PPImage, Row
print
print
print "----Cuenta publica %s, id: %s----" % (self, self.id)
all_images = PPImage.objects.filter(public_account=self)
if image_num:
all_images = all_images.filter(path__icontains=image_num)
if reset:
self.reset(all_images)
all_images = all_images.order_by("path")
special_formats = self.calculate_special_formats_v3(
all_images, column_types[3:], image_num)
"""
Una vez obtenido los valores de special_formats, se tratan los datos:
"""
if not special_formats:
set_new_error(
self, "error al calcular special_formats: %s" %
special_formats)
set_new_error(
self, "No se pocreso ningun imagen.table_data")
all_images = []
for image in all_images:
print u" %s" % image
all_rows = Row.objects.filter(image=image)
if not all_rows.count():
set_new_error(
self, "La imagen %s no proceso Table Data" % image)
for row in all_rows:
errors = row.get_errors()
vision_data = row.get_vision_data()
row_data = []
valid_row = None
for idx, col in enumerate(vision_data):
if idx > 2:
col_ref = column_types[idx]
special_format = special_formats[idx - 3]
final_value, c_errors = calculateNumber(
col, col_ref, special_format)
if len(c_errors):
errors += c_errors
final_value = None
elif idx:
final_value = clean_text(col)
else:
final_value = get_normal_name(col)
valid_row = final_value
row_data.append(final_value)
if (final_value not in [None, ""] and
idx and final_value is not False):
setattr(row, column_types[idx]["field"], final_value)
if valid_row is False:
continue
row.formatted_data = json.dumps(row_data)
if not valid_row:
errors.append("Fila sin información de colonia")
row.errors = json.dumps(errors)
row.save()
def calculate_special_formats_v3(
self, all_images, columns_nums, image_num):
variables = self.get_variables()
count_rows = [0, 0, 0, 0, 0]
special_format_count = [0, 0, 0, 0, 0]
special_formats = [False, False, False, False, False]
for image in all_images[:3]:
for row in image.get_table_data():
for idx, value in enumerate(row[3:]):
sum_col = calculateNumber(value, columns_nums[idx])
if sum_col is not None:
special_format_count[idx] += sum_col
count_rows[idx] += 1
for idx, col in enumerate(columns_nums):
curr_tot = float(count_rows[idx])
is_special = special_format_count[
idx] / curr_tot >= 0.75 if curr_tot else False
special_formats.append(is_special)
variables["special_formats"] = special_formats
self.variables = json.dumps(variables)
self.save()
return special_formats
column_types = [
{
"name": "suburb",
"title": u"Colonia",
"type": "fk"
},
{
"name": "project",
"title": u"Proyecto",
"field": "project_name",
"type": "text"
},
{
"name": "description",
"title": u"Descripción",
"field": "description",
"type": "text"
},
{
"name": "progress",
"title": u"Avance",
"field": "progress",
"type": "number",
"idx": 3
},
{
"name": "approved",
"title": u"Aprobado",
"field": "approved",
"type": "ammount",
"idx": 4
},
{
"name": "modified",
"title": u"Modificado",
"field": "modified",
"type": "ammount",
"idx": 5
},
{
"name": "executed",
"title": u"Ejecutado",
"field": "executed",
"type": "ammount",
"idx": 6
},
{
"name": "variation",
"title": u"Variación",
"field": "variation",
"type": "number",
"idx": 7
},
]
def cleanSuburbName(text):
import unidecode
try:
final_name = unidecode.unidecode(text).upper()
except Exception as e:
print e
final_name = text.upper()
final_name = re.sub(r'(\|)', 'I', final_name)
final_name = re.sub(ur'[^0-9A-Z\s\(\)\_\-\/\º\.\,]', '', final_name)
final_name = final_name.upper()
final_name = re.sub(r'[\.\,\-]', ' ', final_name)
final_name = re.sub(r' +', ' ', final_name)
final_name = re.sub(r'\(\s?', r'(', final_name)
final_name = re.sub(r'\s?\)', r')', final_name)
re_uhab = re.compile(
r'\(\s?(CONJ HAB|UNIDAD HABITACIONAL|U HABS'
r'|CONJUNTO HABITACIONAL)\s?\)')
final_name = re.sub(re_uhab, r'(U HAB)', final_name)
final_name = re.sub(r'\(\s?(FRACCIONAMIENTO)\s?\)', '(FRACC)', final_name)
final_name = re.sub(ur'\(\s?(AMPLIACION|AMPLIACIÓN)\s?\)',
'(AMPL)', final_name)
final_name = re.sub(ur'^(COMITE|COMITÉ|COLONIA)\s', '', final_name)
re_cve = re.compile(r'(\(?\d{2})\s?\D?\s?(\d{3}\)?)')
final_name = re.sub(re_cve, '', final_name)
final_name = final_name.strip()
final_name = re.sub(r' +', ' ', final_name)
return final_name
def get_normal_name(text):
normal_name = cleanSuburbName(text)
raw_non_spaces = len(re.sub(r'[^\w]', '', normal_name))
if bool(re.search(
r'(COLONIA O PUEBLO|ORIGINARIO|UNIDAD RESPON)', normal_name)):
return False
elif bool(re.search(r'(REFIERE|REMANENTE|TOTAL'
r'|AUTORI|ELABORO|LABORADO|DIRECTOR)', normal_name)):
return False
elif raw_non_spaces < 4:
return None
return normal_name
def clean_text(text):
final_text = text
final_text = final_text.strip()
final_text = re.sub(r' +', ' ', final_text)
final_text = re.sub(r'(\(|\¿|\¡)\s?', '\\1', final_text)
final_text = re.sub(r'\s?(\)|\:|\,|\.|\;|\?|\!)', '\\1', final_text)
final_text = re.sub(r'(\)|\:|\,|\.|\;|\?|\!)(\w)', '\\1 \\2', final_text)
final_text = final_text.strip()
return final_text
def calculateNumber(text, column, has_special_format=None):
errors = []
is_ammount = column["type"] == "ammount"
new_value = text
new_value = re.sub(r'(B)', '8', new_value)
new_value = re.sub(r'(O)', '0', new_value)
new_value = re.sub(r'[\(\)]', '', new_value)
new_value = re.sub(r'(s|S)', '5', new_value)
new_value = re.sub(r'(/)', ',', new_value)
has_error_excel = bool(re.search(r'D.V', new_value))
new_value = re.sub(r'[^0-9\,\.\-\%]', '', new_value)
new_value = re.sub(r'(\,)', '.', new_value)
re_ammount = re.compile(r'^\d{1,7}(\.\d{2})?$')
re_percent = re.compile(r'^\-?\d{1,3}(\.\d{1,2})?[4895%]?\)?$')
re_compara = re_ammount if is_ammount else re_percent
has_percent = re.compile(r'[4895%]$')
has_decimals = re.compile(r'\d{2}$')
re_format = has_decimals if is_ammount else has_percent
if is_ammount:
new_value = re.sub(r'\.(\d{3})', '\\1', new_value)
correct_format = bool(re_compara.search(new_value))
if has_special_format is None:
if not correct_format:
return None
return 1 if bool(re_format.search(new_value)) else 0
if not correct_format:
if has_error_excel and not is_ammount:
new_value = '0%' if has_special_format else '0'
else:
err = u"Formato incorrecto en columna %s" % column["title"]
errors.append(err)
only_ints = new_value
if (has_special_format and not is_ammount):
only_ints = re.sub(re_format, '', only_ints)
try:
float_value = float(only_ints)
except Exception as e:
only_ints = re.sub(re_format, '', only_ints)
try:
float_value = float(only_ints)
except Exception as e:
errors.append(
u"No se pudo converir número en columna %s"
% column["title"])
if only_ints:
try:
print ("error al convertir en calculateNumber: \"%s\""
% text)
except Exception as e:
pass
print e
return None, errors
if (is_ammount and 0 < float_value < 1000) or float_value > 10000000:
errors.append(u"Número inválido en columna %s" % column["title"])
elif not is_ammount and float_value > 2:
float_value = float_value / float(100)
return float_value, errors
| false
| true
|
f701516e4b0f9d00ae62ff939cc7915f776b8f29
| 158
|
py
|
Python
|
modelsClasswork/NewbalanceApp/views.py
|
cs-fullstack-2019-spring/django-models-cw-EnrickaM
|
23b080ac8d1e5eea2fb8cf5dbd394f81b8f468ed
|
[
"Apache-2.0"
] | null | null | null |
modelsClasswork/NewbalanceApp/views.py
|
cs-fullstack-2019-spring/django-models-cw-EnrickaM
|
23b080ac8d1e5eea2fb8cf5dbd394f81b8f468ed
|
[
"Apache-2.0"
] | null | null | null |
modelsClasswork/NewbalanceApp/views.py
|
cs-fullstack-2019-spring/django-models-cw-EnrickaM
|
23b080ac8d1e5eea2fb8cf5dbd394f81b8f468ed
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse('TEST URL')
| 17.555556
| 36
| 0.772152
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse('TEST URL')
| true
| true
|
f70152ced9356e1baea309fb033c05892d0f39af
| 2,813
|
py
|
Python
|
tests/test_correlations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
tests/test_correlations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
tests/test_correlations.py
|
energyinpython/pre-pyrepo
|
92e44594e12d1110247f011e51734e5ce1fe0b8e
|
[
"MIT"
] | null | null | null |
from pyrepo import correlations as corrs
from scipy.stats import pearsonr
import unittest
import numpy as np
# Test for Spearman rank correlation coefficient
class Test_Spearman(unittest.TestCase):
def test_spearman(self):
"""Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity
in decision-making problems. In International Conference on Computational Science
(pp. 632-645). Springer, Cham."""
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.spearman(R, Q)
real_result = 0.9
self.assertEqual(test_result, real_result)
# Test for Weighted Spearman rank correlation coefficient
class Test_Weighted_Spearman(unittest.TestCase):
def test_weighted_spearman(self):
"""Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity
in decision-making problems. In International Conference on Computational Science
(pp. 632-645). Springer, Cham."""
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.weighted_spearman(R, Q)
real_result = 0.8833
self.assertEqual(np.round(test_result, 4), real_result)
# Test for Similarity rank coefficient WS
class Test_WS(unittest.TestCase):
def test_ws(self):
"""Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity
in decision-making problems. In International Conference on Computational Science
(pp. 632-645). Springer, Cham."""
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.WS_coeff(R, Q)
real_result = 0.8542
self.assertEqual(np.round(test_result, 4), real_result)
# Test for Pearson correlation coefficient
class Test_Pearson(unittest.TestCase):
def test_pearson(self):
"""Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity
in decision-making problems. In International Conference on Computational Science
(pp. 632-645). Springer, Cham."""
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.pearson_coeff(R, Q)
real_result, _ = pearsonr(R, Q)
self.assertEqual(test_result, real_result)
def main():
test_spearman_coeff = Test_Spearman()
test_spearman_coeff.test_spearman()
test_weighted_spearman_coeff = Test_Weighted_Spearman()
test_weighted_spearman_coeff.test_weighted_spearman()
test_pearson_coeff = Test_Pearson()
test_pearson_coeff.test_pearson()
test_ws = Test_WS()
test_ws.test_ws()
if __name__ == '__main__':
main()
| 34.304878
| 114
| 0.666193
|
from pyrepo import correlations as corrs
from scipy.stats import pearsonr
import unittest
import numpy as np
class Test_Spearman(unittest.TestCase):
def test_spearman(self):
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.spearman(R, Q)
real_result = 0.9
self.assertEqual(test_result, real_result)
class Test_Weighted_Spearman(unittest.TestCase):
def test_weighted_spearman(self):
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.weighted_spearman(R, Q)
real_result = 0.8833
self.assertEqual(np.round(test_result, 4), real_result)
class Test_WS(unittest.TestCase):
def test_ws(self):
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.WS_coeff(R, Q)
real_result = 0.8542
self.assertEqual(np.round(test_result, 4), real_result)
class Test_Pearson(unittest.TestCase):
def test_pearson(self):
R = np.array([1, 2, 3, 4, 5])
Q = np.array([1, 3, 2, 4, 5])
test_result = corrs.pearson_coeff(R, Q)
real_result, _ = pearsonr(R, Q)
self.assertEqual(test_result, real_result)
def main():
test_spearman_coeff = Test_Spearman()
test_spearman_coeff.test_spearman()
test_weighted_spearman_coeff = Test_Weighted_Spearman()
test_weighted_spearman_coeff.test_weighted_spearman()
test_pearson_coeff = Test_Pearson()
test_pearson_coeff.test_pearson()
test_ws = Test_WS()
test_ws.test_ws()
if __name__ == '__main__':
main()
| true
| true
|
f70153728cb260c3c86bc652b2c6fedfd73c3c53
| 4,548
|
py
|
Python
|
core/assembly_system.py
|
YifanQie/Deep_Learning_for_Manufacturing
|
9ba19e41f69c561b04b8573ab9c52c0969f45bfd
|
[
"MIT"
] | 27
|
2019-10-31T15:16:13.000Z
|
2022-03-29T03:56:57.000Z
|
core/assembly_system.py
|
YifanQie/Deep_Learning_for_Manufacturing
|
9ba19e41f69c561b04b8573ab9c52c0969f45bfd
|
[
"MIT"
] | 4
|
2020-03-25T14:18:04.000Z
|
2022-02-10T00:34:58.000Z
|
core/assembly_system.py
|
YifanQie/Deep_Learning_for_Manufacturing
|
9ba19e41f69c561b04b8573ab9c52c0969f45bfd
|
[
"MIT"
] | 7
|
2020-02-23T22:12:37.000Z
|
2021-12-08T20:14:41.000Z
|
import numpy as np
import pandas as pd
""" Contains core classes and methods for initializing a Assembly System, the inputs are provided in assemblyconfig file in utilities"""
class AssemblySystem:
"""Assembly System Class
:param assembly_type: Type of assembly Single-Station/Multi-Station
:type assembly_system: str (required)
:param assembly_kccs: Number of KCCs for the assembly
:type assembly_kccs: int (required)
:param assembly_kpis: Number of Kpis for the assembly
:type assembly_kpis: int (required)
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis):
self.assembly_type=assembly_type
self.assembly_kccs=assembly_kccs
self.assembly_kpis=assembly_kpis
class PartType(AssemblySystem):
"""Part System Class, inherits the Assembly System Class, additional parameters for this class include
:param voxel_dim: Dimension of the voxel
:type assembly_system: int (required)
:param voxel_dim: Dimension of the voxel Channel, single channel output - 1 or multi channel - 2,3 (use 1 for deviations in one direction, 2 or 3 if data for multiple deviation directions are present)
:type assembly_system: int (required)
:param voxel_dim: Dimension of the voxel
:type assembly_system: int (required)
The class contains two functions - get_nominal_cop and get_nominal_cop_database
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim):
super().__init__(assembly_type,assembly_kccs,assembly_kpis)
self.part_name=part_name
self.part_type=part_type
self.voxel_dim=voxel_dim
self.voxel_channels=voxel_channels
self.point_dim=point_dim
def get_nominal_cop(self,file_name):
"""Import nominal cloud-of-point of the assembly from a text/csv file
:param file_name: Name of the input file
:type file_name: str (required)
:returns: numpy array of nominal COP
:rtype: numpy.array [point_dim,3]
"""
df=pd.read_csv(file_name, sep=',',header=None)
nominal_cop=df.values
return nominal_cop
def get_nominal_cop_database(self,conn_str,table_name):
"""Import nominal cloud-of-point of the assembly from a SQL database assumes the table only contains three columns of the nominal COPs in order of the Node IDs
:param conn_str: Connection String for Database
:type conn_str: str (required)
:param table_name: Name of table in the database
:type table_name: str (required)
:returns: numpy array of dim points * 3
:rtype: numpy.array [point_dim,3]
"""
engine = create_engine(conn_str)
squery ='select * from '+table_name
df_nom = pd.read_sql_query(squery,con=engine)
df_nom = df_nom.values
return df_nom
class VRMSimulationModel(PartType):
"""VRM Simulation Model class inherits the part type class, additional parameters of this class include
:param noise_level: The level of artificial noise to be added to simulated data, typically set to 0.1 mm from the measurement system class depending on the scanner
:type noise_level: float (required)
:param noise_type: The type of noise to be added, can be Gaussian or uniform , for Gaussian noise_level is set as standard deviation and mean as zero for uniform the min and max are set -noise_level and +noise_level respectively
:type noise_type: str (optional)
:param convergency_flag: Flag to denote if the simulation model had converged while simulating, is set to 1 by default
:type convergency_flag: int (optional)
The class contains one function kpi_calculator that needs to be defined by the user depending on the assembly output
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,noise_level,noise_type='uniform',convergency_flag=1):
super().__init__(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
self.noise_level=noise_level
self.noise_type=noise_type
self.convergency_flag=convergency_flag
def kpi_calculator(self,cop_data,kpi_params=[]):
""" User defined function to calculate KPI from Cloud of Point Data [KPI]=f(Cop)
:param cop_data: CoP data for a given sample
:type cop_data: np_array [point_dim,3] (required)
:param kpi_params: Various parameters required to calculate the KPI, can be blank if no parameters are required to calculate KPI from CoP
:type kpi_params: list (optional)
:returns: list of multivariate KPIs for the given CoP
:rtype: list
"""
kpi=[None]*self.assembly_kpis
#define function here
return kpi
| 39.547826
| 230
| 0.776165
|
import numpy as np
import pandas as pd
class AssemblySystem:
def __init__(self,assembly_type,assembly_kccs,assembly_kpis):
self.assembly_type=assembly_type
self.assembly_kccs=assembly_kccs
self.assembly_kpis=assembly_kpis
class PartType(AssemblySystem):
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim):
super().__init__(assembly_type,assembly_kccs,assembly_kpis)
self.part_name=part_name
self.part_type=part_type
self.voxel_dim=voxel_dim
self.voxel_channels=voxel_channels
self.point_dim=point_dim
def get_nominal_cop(self,file_name):
df=pd.read_csv(file_name, sep=',',header=None)
nominal_cop=df.values
return nominal_cop
def get_nominal_cop_database(self,conn_str,table_name):
engine = create_engine(conn_str)
squery ='select * from '+table_name
df_nom = pd.read_sql_query(squery,con=engine)
df_nom = df_nom.values
return df_nom
class VRMSimulationModel(PartType):
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,noise_level,noise_type='uniform',convergency_flag=1):
super().__init__(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
self.noise_level=noise_level
self.noise_type=noise_type
self.convergency_flag=convergency_flag
def kpi_calculator(self,cop_data,kpi_params=[]):
kpi=[None]*self.assembly_kpis
return kpi
| true
| true
|
f701539473089962a5184dad3b593c3ad907b062
| 870
|
py
|
Python
|
tests/namespace_test/NamespaceA/SecondTableInA.py
|
shivvis/flatbuffers
|
791c83aa7e3bfee37ff592cb80910caead5f625c
|
[
"Apache-2.0"
] | 24
|
2016-06-06T09:17:29.000Z
|
2021-01-31T11:14:18.000Z
|
tests/namespace_test/NamespaceA/SecondTableInA.py
|
shivvis/flatbuffers
|
791c83aa7e3bfee37ff592cb80910caead5f625c
|
[
"Apache-2.0"
] | 1
|
2016-09-25T11:20:30.000Z
|
2016-09-25T11:20:30.000Z
|
tests/namespace_test/NamespaceA/SecondTableInA.py
|
shivvis/flatbuffers
|
791c83aa7e3bfee37ff592cb80910caead5f625c
|
[
"Apache-2.0"
] | 11
|
2016-08-18T17:57:28.000Z
|
2019-09-10T07:30:19.000Z
|
# automatically generated, do not modify
# namespace: NamespaceA
import flatbuffers
class SecondTableInA(object):
__slots__ = ['_tab']
# SecondTableInA
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SecondTableInA
def ReferToC(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .TableInC import TableInC
obj = TableInC()
obj.Init(self._tab.Bytes, x)
return obj
return None
def SecondTableInAStart(builder): builder.StartObject(1)
def SecondTableInAAddReferToC(builder, referToC): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(referToC), 0)
def SecondTableInAEnd(builder): return builder.EndObject()
| 31.071429
| 149
| 0.687356
|
import flatbuffers
class SecondTableInA(object):
__slots__ = ['_tab']
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def ReferToC(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .TableInC import TableInC
obj = TableInC()
obj.Init(self._tab.Bytes, x)
return obj
return None
def SecondTableInAStart(builder): builder.StartObject(1)
def SecondTableInAAddReferToC(builder, referToC): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(referToC), 0)
def SecondTableInAEnd(builder): return builder.EndObject()
| true
| true
|
f7015476aad1aa9cc35eb49f95af082697d8246d
| 1,084
|
py
|
Python
|
dialogos/build/w3lib/setup.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
dialogos/build/w3lib/setup.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
dialogos/build/w3lib/setup.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='w3lib',
version='1.12.0',
license='BSD',
description='Library of web-related functions',
author='Scrapy project',
author_email='[email protected]',
url='https://github.com/scrapy/w3lib',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_zafe=False,
platforms=['Any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=['six >= 1.4.1'],
)
| 33.875
| 70
| 0.609779
|
from setuptools import setup, find_packages
setup(
name='w3lib',
version='1.12.0',
license='BSD',
description='Library of web-related functions',
author='Scrapy project',
author_email='[email protected]',
url='https://github.com/scrapy/w3lib',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_zafe=False,
platforms=['Any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=['six >= 1.4.1'],
)
| true
| true
|
f70154ae8605b52640bf200ccaaa8bf6ad001bf5
| 1,106
|
py
|
Python
|
Module3/assignment2.py
|
dipdeb/DAT210x
|
9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b
|
[
"MIT"
] | null | null | null |
Module3/assignment2.py
|
dipdeb/DAT210x
|
9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b
|
[
"MIT"
] | null | null | null |
Module3/assignment2.py
|
dipdeb/DAT210x
|
9103844fa7f76052bdcc5a4ec60e8afbc91a9f6b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
#
# TODO: Load up the Seeds Dataset into a Dataframe
# It's located at 'Datasets/wheat.data'
#
wheat_df = pd.read_csv('/home/dipanjan/DAT210x/Module3/Datasets/wheat.data', index_col=0);
#
# TODO: Create a 2d scatter plot that graphs the
# area and perimeter features
#
# .. your code here ..
wheat_df.plot.scatter(x='area', y='perimeter')
#
# TODO: Create a 2d scatter plot that graphs the
# groove and asymmetry features
#
# .. your code here ..
wheat_df.plot.scatter(x='groove', y='asymmetry')
#
# TODO: Create a 2d scatter plot that graphs the
# compactness and width features
#
# .. your code here ..
wheat_df.plot.scatter(x='compactness', y='width')
# BONUS TODO:
# After completing the above, go ahead and run your program
# Check out the results, and see what happens when you add
# in the optional display parameter marker with values of
# either '^', '.', or 'o'.
wheat_df.plot.scatter(x='compactness', y='width', marker='o')
plt.show()
| 22.571429
| 90
| 0.712477
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
plt.style.use('ggplot')
#
wheat_df = pd.read_csv('/home/dipanjan/DAT210x/Module3/Datasets/wheat.data', index_col=0);
#
# TODO: Create a 2d scatter plot that graphs the
# area and perimeter features
#
# .. your code here ..
wheat_df.plot.scatter(x='area', y='perimeter')
#
# TODO: Create a 2d scatter plot that graphs the
# groove and asymmetry features
#
# .. your code here ..
wheat_df.plot.scatter(x='groove', y='asymmetry')
#
# TODO: Create a 2d scatter plot that graphs the
# compactness and width features
#
# .. your code here ..
wheat_df.plot.scatter(x='compactness', y='width')
# BONUS TODO:
# After completing the above, go ahead and run your program
# Check out the results, and see what happens when you add
# in the optional display parameter marker with values of
# either '^', '.', or 'o'.
wheat_df.plot.scatter(x='compactness', y='width', marker='o')
plt.show()
| true
| true
|
f70155c72d7ad130cb57d825accdfa6901578200
| 4,547
|
py
|
Python
|
scf/scf_utils.py
|
hongzhouye/sigma-SCF
|
62e2dce538d1e68c4dc3c72fdf27beb1911e544f
|
[
"BSD-3-Clause"
] | 4
|
2016-07-30T22:02:50.000Z
|
2018-08-02T23:46:15.000Z
|
scf/scf_utils.py
|
hongzhouye/sigma-SCF
|
62e2dce538d1e68c4dc3c72fdf27beb1911e544f
|
[
"BSD-3-Clause"
] | 11
|
2017-08-04T20:34:04.000Z
|
2017-08-08T23:07:42.000Z
|
scf/scf_utils.py
|
hongzhouye/sigma-SCF
|
62e2dce538d1e68c4dc3c72fdf27beb1911e544f
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import os, sys
sys.path.append(os.path.dirname(__file__))
from diis_solver import diis_solver, diis_solver_uhf
sys.path.pop()
import jk
import xform
def homo_lumo_mix(C, nocc, beta):
"""
Mix a portion of LUMO to HOMO.
Used when generating spin-unrestricted guess.
"""
if beta < 0. or beta > 1.:
raise Exception("Mixing beta must be in [0, 1]")
Cb = C.copy()
homo = C[:, nocc - 1]
lumo = C[:, nocc]
Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo
return Cb
def get_dm(C, nel):
D = C[:, :nel]
D = D @ D.T
return D
def get_JK(is_fitted, g, D):
if(is_fitted):
# FINISH LATER
X = np.einsum("Pls,ls->P", g, D)
J = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Z = np.einsum("Pns,ls->Pnl", g, D)
K = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Z)
return (J, K)
else:
#J = np.einsum("pqrs,rs->pq", g, D)
#K = np.einsum("prqs,rs->pq", g, D)
J, K = jk.getJK_np_Dshift(g, D - np.diag(np.diag(D) * 0.5))
return (J, K)
def get_JK_uhf(is_fitted, g, Ds):
"""
Ds = [Da, Db]
"""
Da, Db = Ds[0], Ds[1]
Dtot = Da + Db
if (is_fitted == True):
X = np.einsum("Pls,ls->P", g, Dtot)
Jtot = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Za = np.einsum("Pns,ls->Pnl", g, Da)
Ka = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Za)
Zb = np.einsum("Pns,ls->Pnl", g, Db)
Kb = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Zb)
return Jtot, Ka, Kb
else:
Jtot = np.einsum("pqrs, rs -> pq", g, Dtot)
Ka = np.einsum("prqs, rs -> pq", g, Da)
Kb = np.einsum("prqs, rs -> pq", g, Db)
return Jtot, Ka, Kb
def get_fock(H, g, D):
J, K = get_JK(len(g.shape) == 3, g, D)
return H + 2 * J - K
def diis_update(F_prev_list, r_prev_list):
c = diis_solver(r_prev_list) # GET THE COEFFICIENTS!!
out = 0 * F_prev_list[0]
for i, element in enumerate(F_prev_list):
out += c[i] * element
return out
def oda_update(dF, dD, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
E_deriv = np.sum(dF * dD)
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
def get_fock_uhf(H, g, Ds):
"""
DIIS update given previous Fock matrices and error vectors.
Note that if there are less than two F's, return normal F.
"""
Jtot, Ka, Kb = get_JK_uhf(len(g.shape) == 3, g, Ds)
return H + Jtot - Ka, H + Jtot - Kb
def diis_update_uhf(F_prev_lists, r_prev_lists):
c = diis_solver_uhf(r_prev_lists[0], r_prev_lists[1])
Fa = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[0]):
Fa += c[i] * element
Fb = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[1]):
Fb += c[i] * element
return Fa, Fb
def oda_update_uhf(dFs, dDs, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
if type(dFs) is not list:
raise Exception("arg1 and arg2 are list of alpha/beta matrices.")
E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
def diag(F, A):
Fp = A.T @ F @ A
eps, Cp = np.linalg.eigh(Fp)
C = A @ Cp
return eps, C
def get_SCF_err(S, D, F):
err_v = S @ D @ F - F @ D @ S
err = np.mean(err_v ** 2) ** 0.5
return err, err_v
def get_SCF_energy(H, F, D, unrestricted):
"""
Calculates the energy.
"""
if unrestricted == True:
if type(F) is not list or type(D) is not list:
raise Exception("For UHF, F and D must have type list.")
Fa, Fb = F[0], F[1]
Da, Db = D[0], D[1]
Dtot = Da + Db
return np.sum(Dtot * H + Da * Fa + Db * Fb) * 0.5
else:
return np.sum((H + F) * D)
def xform_2(H, A):
"""
Basis xform for 2-tensor
"""
if len(H.shape) != 2:
raise Exception("Dimension error: arg1 should be a matrix")
return A.T @ H @ A
def xform_4(g, A):
"""
Basis xform for 4-tensor
"""
if len(g.shape) != 4:
raise Exception("""
Dimension error: arg1 should be a four-tensor.
Note that you should set is_fitted to be False.
""")
#return np.einsum("pi, qj, pqrs, rk, sl -> ijkl", A, A, g, A, A, optimize=True)
return xform.xform_4_np(g, A)
| 25.982857
| 83
| 0.533978
|
import numpy as np
import os, sys
sys.path.append(os.path.dirname(__file__))
from diis_solver import diis_solver, diis_solver_uhf
sys.path.pop()
import jk
import xform
def homo_lumo_mix(C, nocc, beta):
if beta < 0. or beta > 1.:
raise Exception("Mixing beta must be in [0, 1]")
Cb = C.copy()
homo = C[:, nocc - 1]
lumo = C[:, nocc]
Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo
return Cb
def get_dm(C, nel):
D = C[:, :nel]
D = D @ D.T
return D
def get_JK(is_fitted, g, D):
if(is_fitted):
X = np.einsum("Pls,ls->P", g, D)
J = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Z = np.einsum("Pns,ls->Pnl", g, D)
K = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Z)
return (J, K)
else:
J, K = jk.getJK_np_Dshift(g, D - np.diag(np.diag(D) * 0.5))
return (J, K)
def get_JK_uhf(is_fitted, g, Ds):
Da, Db = Ds[0], Ds[1]
Dtot = Da + Db
if (is_fitted == True):
X = np.einsum("Pls,ls->P", g, Dtot)
Jtot = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Za = np.einsum("Pns,ls->Pnl", g, Da)
Ka = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Za)
Zb = np.einsum("Pns,ls->Pnl", g, Db)
Kb = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Zb)
return Jtot, Ka, Kb
else:
Jtot = np.einsum("pqrs, rs -> pq", g, Dtot)
Ka = np.einsum("prqs, rs -> pq", g, Da)
Kb = np.einsum("prqs, rs -> pq", g, Db)
return Jtot, Ka, Kb
def get_fock(H, g, D):
J, K = get_JK(len(g.shape) == 3, g, D)
return H + 2 * J - K
def diis_update(F_prev_list, r_prev_list):
c = diis_solver(r_prev_list) out = 0 * F_prev_list[0]
for i, element in enumerate(F_prev_list):
out += c[i] * element
return out
def oda_update(dF, dD, dE):
E_deriv = np.sum(dF * dD)
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
def get_fock_uhf(H, g, Ds):
Jtot, Ka, Kb = get_JK_uhf(len(g.shape) == 3, g, Ds)
return H + Jtot - Ka, H + Jtot - Kb
def diis_update_uhf(F_prev_lists, r_prev_lists):
c = diis_solver_uhf(r_prev_lists[0], r_prev_lists[1])
Fa = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[0]):
Fa += c[i] * element
Fb = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[1]):
Fb += c[i] * element
return Fa, Fb
def oda_update_uhf(dFs, dDs, dE):
if type(dFs) is not list:
raise Exception("arg1 and arg2 are list of alpha/beta matrices.")
E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
def diag(F, A):
Fp = A.T @ F @ A
eps, Cp = np.linalg.eigh(Fp)
C = A @ Cp
return eps, C
def get_SCF_err(S, D, F):
err_v = S @ D @ F - F @ D @ S
err = np.mean(err_v ** 2) ** 0.5
return err, err_v
def get_SCF_energy(H, F, D, unrestricted):
if unrestricted == True:
if type(F) is not list or type(D) is not list:
raise Exception("For UHF, F and D must have type list.")
Fa, Fb = F[0], F[1]
Da, Db = D[0], D[1]
Dtot = Da + Db
return np.sum(Dtot * H + Da * Fa + Db * Fb) * 0.5
else:
return np.sum((H + F) * D)
def xform_2(H, A):
if len(H.shape) != 2:
raise Exception("Dimension error: arg1 should be a matrix")
return A.T @ H @ A
def xform_4(g, A):
if len(g.shape) != 4:
raise Exception("""
Dimension error: arg1 should be a four-tensor.
Note that you should set is_fitted to be False.
""")
return xform.xform_4_np(g, A)
| true
| true
|
f701560ac0a3a86c789a437ba0a2eed06d2ca194
| 1,250
|
py
|
Python
|
tests/schema/test_schema.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | null | null | null |
tests/schema/test_schema.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | 24
|
2020-04-02T19:29:07.000Z
|
2022-03-08T03:05:43.000Z
|
tests/schema/test_schema.py
|
simonsobs/acondbs
|
6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6
|
[
"MIT"
] | 1
|
2020-04-08T15:48:28.000Z
|
2020-04-08T15:48:28.000Z
|
import pytest
from .funcs import assert_query
QUERY = '''
{
__schema {
types {
kind
name
fields {
name
}
}
queryType {
fields {
name
}
}
mutationType {
fields {
name
}
}
subscriptionType {
fields {
name
}
}
}
}
'''
##__________________________________________________________________||
params = [
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 90b2ee5fed25506df04fd37343bb68d1803dd97f"},
id="admin",
),
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 0fb8c9e16d6f7c4961c4c49212bf197d79f14080"},
id="private",
),
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 1a2d18f270df3abacfb85c5413b668f97794b4ce"},
id="public-wrong-token",
),
pytest.param(
{"query": QUERY},
{},
id="public-no-token",
),
]
@pytest.mark.parametrize("data, headers", params)
@pytest.mark.asyncio
async def test_schema(app_users, snapshot, data, headers):
await assert_query(app_users, snapshot, data, headers)
##__________________________________________________________________||
| 18.656716
| 77
| 0.5896
|
import pytest
from .funcs import assert_query
QUERY = '''
{
__schema {
types {
kind
name
fields {
name
}
}
queryType {
fields {
name
}
}
mutationType {
fields {
name
}
}
subscriptionType {
fields {
name
}
}
}
}
'''
params = [
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 90b2ee5fed25506df04fd37343bb68d1803dd97f"},
id="admin",
),
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 0fb8c9e16d6f7c4961c4c49212bf197d79f14080"},
id="private",
),
pytest.param(
{"query": QUERY},
{"Authorization": "Bearer 1a2d18f270df3abacfb85c5413b668f97794b4ce"},
id="public-wrong-token",
),
pytest.param(
{"query": QUERY},
{},
id="public-no-token",
),
]
@pytest.mark.parametrize("data, headers", params)
@pytest.mark.asyncio
async def test_schema(app_users, snapshot, data, headers):
await assert_query(app_users, snapshot, data, headers)
| true
| true
|
f70156a3a2fcf8fcc48c436b8d608cbe337dddd0
| 2,062
|
py
|
Python
|
setup.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | 1
|
2022-02-03T09:31:24.000Z
|
2022-02-03T09:31:24.000Z
|
setup.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | 6
|
2022-02-03T09:24:24.000Z
|
2022-02-07T09:25:16.000Z
|
setup.py
|
hannes-holey/hans
|
9604eedd70d54f3d4e2058fbc5b911e92e005e4f
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright 2021 Hannes Holey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup, find_packages
with open("requirements.txt", "r") as fh:
requirements = [line.strip() for line in fh]
setup(name='hans',
description='Height-Averaged Navier-Stokes (HANS) solver for 2D lubrication problems',
author='Hannes Holey',
author_email='[email protected]',
url='http://github.com/hannes-holey/hans',
license="MIT",
packages=find_packages(),
package_data={'': ['ChangeLog.md']},
include_package_data=True,
scripts=['cli/plot1D_evolution.py',
'cli/plot1D_last.py',
'cli/plot2D_last.py',
'cli/plot_scalar.py',
'cli/read_config.py',
'cli/animate1D.py',
'cli/animate2D.py'],
test_suite='tests',
tests_require=["pytest>=4"],
install_requires=requirements,
python_requires=">=3.6",
use_scm_version=True,
setup_requires=['setuptools_scm>=3.5.0'],
zip_safe=False)
| 38.185185
| 92
| 0.70805
|
from setuptools import setup, find_packages
with open("requirements.txt", "r") as fh:
requirements = [line.strip() for line in fh]
setup(name='hans',
description='Height-Averaged Navier-Stokes (HANS) solver for 2D lubrication problems',
author='Hannes Holey',
author_email='[email protected]',
url='http://github.com/hannes-holey/hans',
license="MIT",
packages=find_packages(),
package_data={'': ['ChangeLog.md']},
include_package_data=True,
scripts=['cli/plot1D_evolution.py',
'cli/plot1D_last.py',
'cli/plot2D_last.py',
'cli/plot_scalar.py',
'cli/read_config.py',
'cli/animate1D.py',
'cli/animate2D.py'],
test_suite='tests',
tests_require=["pytest>=4"],
install_requires=requirements,
python_requires=">=3.6",
use_scm_version=True,
setup_requires=['setuptools_scm>=3.5.0'],
zip_safe=False)
| true
| true
|
f701581069cfd8be095c0662247dab35763588a4
| 4,161
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/annotation_converters/cvat_multilabel_recognition.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 1,031
|
2020-07-16T08:30:57.000Z
|
2022-03-30T19:42:52.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/cvat_multilabel_recognition.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 966
|
2020-07-16T08:13:00.000Z
|
2022-03-31T18:09:18.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/cvat_multilabel_recognition.py
|
APrigarina/open_model_zoo
|
b1ff98b64a6222cf6b5f3838dc0271422250de95
|
[
"Apache-2.0"
] | 440
|
2020-07-16T12:52:50.000Z
|
2022-03-31T14:21:41.000Z
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .format_converter import FileBasedAnnotationConverter, ConverterReturn
from ..representation import MultiLabelRecognitionAnnotation
from ..utils import read_xml, check_file_existence
from ..config import StringField, PathField, ConfigError
class CVATMultilabelAttributesRecognitionConverter(FileBasedAnnotationConverter):
__provider__ = 'cvat_multilabel_binary_attributes_recognition'
annotation_types = (MultiLabelRecognitionAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'label': StringField(description='specific label for attribute collection'),
'images_dir': PathField(
is_directory=True, optional=True,
description='path to dataset images, used only for content existence check'
)
})
return configuration_parameters
def configure(self):
super().configure()
self.label = self.get_value_from_config('label')
self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
annotation = read_xml(self.annotation_file)
meta = annotation.find('meta')
size = int(meta.find('task').find('size').text)
label = self.select_label(meta)
label_to_id = {attribute.find('name').text: idx for idx, attribute in enumerate(label.iter('attribute'))}
num_attributes = len(label_to_id)
annotations = []
content_errors = None if not check_content else []
for image_id, image in enumerate(annotation.iter('image')):
identifier = image.attrib['name'].split('/')[-1]
if check_content:
if not check_file_existence(self.images_dir / identifier):
content_errors.append('{}: does not exist'.format(self.images_dir / identifier))
for bbox in image:
if 'label' not in bbox.attrib.keys() or bbox.attrib['label'] != self.label:
continue
bbox_rect = [
float(bbox.attrib['xtl']), float(bbox.attrib['ytl']),
float(bbox.attrib['xbr']), float(bbox.attrib['ybr'])
]
attributes = -np.ones(num_attributes)
for attribute in bbox.iter('attribute'):
attribute_name = attribute.attrib['name']
attribute_label = label_to_id[attribute_name]
attributes[attribute_label] = 1 if attribute.text == 'T' else 0
attributes_annotation = MultiLabelRecognitionAnnotation(identifier, attributes)
attributes_annotation.metadata['rect'] = bbox_rect
annotations.append(attributes_annotation)
if progress_callback is not None and image_id % progress_interval == 0:
progress_callback(image_id * 100 / size)
return ConverterReturn(annotations, self.generate_meta(label_to_id), content_errors)
@staticmethod
def generate_meta(attribute_values_mapping):
return {'label_map': {value: key for key, value in attribute_values_mapping.items()}}
def select_label(self, meta):
label = [label for label in meta.iter('label') if label.find('name').text == self.label]
if not label:
raise ConfigError('{} does not present in annotation'.format(self.label))
return label[0]
| 46.233333
| 113
| 0.670031
|
import numpy as np
from .format_converter import FileBasedAnnotationConverter, ConverterReturn
from ..representation import MultiLabelRecognitionAnnotation
from ..utils import read_xml, check_file_existence
from ..config import StringField, PathField, ConfigError
class CVATMultilabelAttributesRecognitionConverter(FileBasedAnnotationConverter):
__provider__ = 'cvat_multilabel_binary_attributes_recognition'
annotation_types = (MultiLabelRecognitionAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'label': StringField(description='specific label for attribute collection'),
'images_dir': PathField(
is_directory=True, optional=True,
description='path to dataset images, used only for content existence check'
)
})
return configuration_parameters
def configure(self):
super().configure()
self.label = self.get_value_from_config('label')
self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
annotation = read_xml(self.annotation_file)
meta = annotation.find('meta')
size = int(meta.find('task').find('size').text)
label = self.select_label(meta)
label_to_id = {attribute.find('name').text: idx for idx, attribute in enumerate(label.iter('attribute'))}
num_attributes = len(label_to_id)
annotations = []
content_errors = None if not check_content else []
for image_id, image in enumerate(annotation.iter('image')):
identifier = image.attrib['name'].split('/')[-1]
if check_content:
if not check_file_existence(self.images_dir / identifier):
content_errors.append('{}: does not exist'.format(self.images_dir / identifier))
for bbox in image:
if 'label' not in bbox.attrib.keys() or bbox.attrib['label'] != self.label:
continue
bbox_rect = [
float(bbox.attrib['xtl']), float(bbox.attrib['ytl']),
float(bbox.attrib['xbr']), float(bbox.attrib['ybr'])
]
attributes = -np.ones(num_attributes)
for attribute in bbox.iter('attribute'):
attribute_name = attribute.attrib['name']
attribute_label = label_to_id[attribute_name]
attributes[attribute_label] = 1 if attribute.text == 'T' else 0
attributes_annotation = MultiLabelRecognitionAnnotation(identifier, attributes)
attributes_annotation.metadata['rect'] = bbox_rect
annotations.append(attributes_annotation)
if progress_callback is not None and image_id % progress_interval == 0:
progress_callback(image_id * 100 / size)
return ConverterReturn(annotations, self.generate_meta(label_to_id), content_errors)
@staticmethod
def generate_meta(attribute_values_mapping):
return {'label_map': {value: key for key, value in attribute_values_mapping.items()}}
def select_label(self, meta):
label = [label for label in meta.iter('label') if label.find('name').text == self.label]
if not label:
raise ConfigError('{} does not present in annotation'.format(self.label))
return label[0]
| true
| true
|
f701593506fcf4f739f92e926ff4b2ac42373413
| 15,435
|
py
|
Python
|
bin/awscli/customizations/history/show.py
|
iilness2/bash-lambda-layer-custom
|
0b054d4ccb0623460354ba1f58059258c095a494
|
[
"MIT"
] | 399
|
2018-12-09T14:38:30.000Z
|
2022-03-31T19:34:05.000Z
|
bin/awscli/customizations/history/show.py
|
iilness2/bash-lambda-layer-custom
|
0b054d4ccb0623460354ba1f58059258c095a494
|
[
"MIT"
] | 64
|
2018-12-10T01:34:46.000Z
|
2022-01-13T14:04:34.000Z
|
bin/awscli/customizations/history/show.py
|
iilness2/bash-lambda-layer-custom
|
0b054d4ccb0623460354ba1f58059258c095a494
|
[
"MIT"
] | 80
|
2018-12-10T10:36:53.000Z
|
2022-03-22T13:40:32.000Z
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import sys
import xml.parsers.expat
import xml.dom.minidom
import colorama
from awscli.compat import six
from awscli.customizations.history.commands import HistorySubcommand
from awscli.customizations.history.filters import RegexFilter
class Formatter(object):
def __init__(self, output=None, include=None, exclude=None):
"""Formats and outputs CLI history events
:type output: File-like obj
:param output: The stream to write the formatted event to. By default
sys.stdout is used.
:type include: list
:param include: A filter specifying which event to only be displayed.
This parameter is mutually exclusive with exclude.
:type exclude: list
:param exclude: A filter specifying which events to exclude from being
displayed. This parameter is mutually exclusive with include.
"""
self._output = output
if self._output is None:
self._output = sys.stdout
if include and exclude:
raise ValueError(
'Either input or exclude can be provided but not both')
self._include = include
self._exclude = exclude
def display(self, event_record):
"""Displays a formatted version of the event record
:type event_record: dict
:param event_record: The event record to format and display.
"""
if self._should_display(event_record):
self._display(event_record)
def _display(self, event_record):
raise NotImplementedError('_display()')
def _should_display(self, event_record):
if self._include:
return event_record['event_type'] in self._include
elif self._exclude:
return event_record['event_type'] not in self._exclude
else:
return True
class DetailedFormatter(Formatter):
_SIG_FILTER = RegexFilter(
'Signature=([a-z0-9]{4})[a-z0-9]{60}',
r'Signature=\1...',
)
_SECTIONS = {
'CLI_VERSION': {
'title': 'AWS CLI command entered',
'values': [
{'description': 'with AWS CLI version'}
]
},
'CLI_ARGUMENTS': {
'values': [
{'description': 'with arguments'}
]
},
'API_CALL': {
'title': 'API call made',
'values': [
{
'description': 'to service',
'payload_key': 'service'
},
{
'description': 'using operation',
'payload_key': 'operation'
},
{
'description': 'with parameters',
'payload_key': 'params',
'value_format': 'dictionary'
}
]
},
'HTTP_REQUEST': {
'title': 'HTTP request sent',
'values': [
{
'description': 'to URL',
'payload_key': 'url'
},
{
'description': 'with method',
'payload_key': 'method'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary',
'filters': [_SIG_FILTER]
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'HTTP_RESPONSE': {
'title': 'HTTP response received',
'values': [
{
'description': 'with status code',
'payload_key': 'status_code'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary'
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'PARSED_RESPONSE': {
'title': 'HTTP response parsed',
'values': [
{
'description': 'parsed to',
'value_format': 'dictionary'
}
]
},
'CLI_RC': {
'title': 'AWS CLI command exited',
'values': [
{'description': 'with return code'}
]
},
}
_COMPONENT_COLORS = {
'title': colorama.Style.BRIGHT,
'description': colorama.Fore.CYAN
}
def __init__(self, output=None, include=None, exclude=None, colorize=True):
super(DetailedFormatter, self).__init__(output, include, exclude)
self._request_id_to_api_num = {}
self._num_api_calls = 0
self._colorize = colorize
self._value_pformatter = SectionValuePrettyFormatter()
if self._colorize:
colorama.init(autoreset=True, strip=False)
def _display(self, event_record):
section_definition = self._SECTIONS.get(event_record['event_type'])
if section_definition is not None:
self._display_section(event_record, section_definition)
def _display_section(self, event_record, section_definition):
if 'title' in section_definition:
self._display_title(section_definition['title'], event_record)
for value_definition in section_definition['values']:
self._display_value(value_definition, event_record)
def _display_title(self, title, event_record):
formatted_title = self._format_section_title(title, event_record)
self._write_output(formatted_title)
def _display_value(self, value_definition, event_record):
value_description = value_definition['description']
event_record_payload = event_record['payload']
value = event_record_payload
if 'payload_key' in value_definition:
value = event_record_payload[value_definition['payload_key']]
formatted_value = self._format_description(value_description)
formatted_value += self._format_value(
value, event_record, value_definition.get('value_format')
)
if 'filters' in value_definition:
for text_filter in value_definition['filters']:
formatted_value = text_filter.filter_text(formatted_value)
self._write_output(formatted_value)
def _write_output(self, content):
if isinstance(content, six.text_type):
content = content.encode('utf-8')
self._output.write(content)
def _format_section_title(self, title, event_record):
formatted_title = title
api_num = self._get_api_num(event_record)
if api_num is not None:
formatted_title = ('[%s] ' % api_num) + formatted_title
formatted_title = self._color_if_configured(formatted_title, 'title')
formatted_title += '\n'
formatted_timestamp = self._format_description('at time')
formatted_timestamp += self._format_value(
event_record['timestamp'], event_record, value_format='timestamp')
return '\n' + formatted_title + formatted_timestamp
def _get_api_num(self, event_record):
request_id = event_record['request_id']
if request_id:
if request_id not in self._request_id_to_api_num:
self._request_id_to_api_num[
request_id] = self._num_api_calls
self._num_api_calls += 1
return self._request_id_to_api_num[request_id]
def _format_description(self, value_description):
return self._color_if_configured(
value_description + ': ', 'description')
def _format_value(self, value, event_record, value_format=None):
if value_format:
formatted_value = self._value_pformatter.pformat(
value, value_format, event_record)
else:
formatted_value = str(value)
return formatted_value + '\n'
def _color_if_configured(self, text, component):
if self._colorize:
color = self._COMPONENT_COLORS[component]
return color + text + colorama.Style.RESET_ALL
return text
class SectionValuePrettyFormatter(object):
def pformat(self, value, value_format, event_record):
return getattr(self, '_pformat_' + value_format)(value, event_record)
def _pformat_timestamp(self, event_timestamp, event_record=None):
return datetime.datetime.fromtimestamp(
event_timestamp/1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def _pformat_dictionary(self, obj, event_record=None):
return json.dumps(obj=obj, sort_keys=True, indent=4)
def _pformat_http_body(self, body, event_record):
if not body:
return 'There is no associated body'
elif event_record['payload'].get('streaming', False):
return 'The body is a stream and will not be displayed'
elif self._is_xml(body):
# TODO: Figure out a way to minimize the number of times we have
# to parse the XML. Currently at worst, it will take three times.
# One to determine if it is XML, another to stip whitespace, and
# a third to convert to make it pretty. This is an issue as it
# can cause issues when there are large XML payloads such as
# an s3 ListObjects call.
return self._get_pretty_xml(body)
elif self._is_json_structure(body):
return self._get_pretty_json(body)
else:
return body
def _get_pretty_xml(self, body):
# The body is parsed and whitespace is stripped because some services
# like ec2 already return pretty XML and if toprettyxml() was applied
# to it, it will add even more newlines and spaces on top of it.
# So this just removes all whitespace from the start to prevent the
# chance of adding to much newlines and spaces when toprettyxml()
# is called.
stripped_body = self._strip_whitespace(body)
xml_dom = xml.dom.minidom.parseString(stripped_body)
return xml_dom.toprettyxml(indent=' '*4, newl='\n')
def _get_pretty_json(self, body):
# The json body is loaded so it can be dumped in a format that
# is desired.
obj = json.loads(body)
return self._pformat_dictionary(obj)
def _is_xml(self, body):
try:
xml.dom.minidom.parseString(body)
except xml.parsers.expat.ExpatError:
return False
return True
def _strip_whitespace(self, xml_string):
xml_dom = xml.dom.minidom.parseString(xml_string)
return ''.join(
[line.strip() for line in xml_dom.toxml().splitlines()]
)
def _is_json_structure(self, body):
if body.startswith('{'):
try:
json.loads(body)
return True
except json.decoder.JSONDecodeError:
return False
return False
class ShowCommand(HistorySubcommand):
NAME = 'show'
DESCRIPTION = (
'Shows the various events related to running a specific CLI command. '
'If this command is ran without any positional arguments, it will '
'display the events for the last CLI command ran.'
)
FORMATTERS = {
'detailed': DetailedFormatter
}
ARG_TABLE = [
{'name': 'command_id', 'nargs': '?', 'default': 'latest',
'positional_arg': True,
'help_text': (
'The ID of the CLI command to show. If this positional argument '
'is omitted, it will show the last the CLI command ran.')},
{'name': 'include', 'nargs': '+',
'help_text': (
'Specifies which events to **only** include when showing the '
'CLI command. This argument is mutually exclusive with '
'``--exclude``.')},
{'name': 'exclude', 'nargs': '+',
'help_text': (
'Specifies which events to exclude when showing the '
'CLI command. This argument is mutually exclusive with '
'``--include``.')},
{'name': 'format', 'choices': FORMATTERS.keys(),
'default': 'detailed', 'help_text': (
'Specifies which format to use in showing the events for '
'the specified CLI command. The following formats are '
'supported:\n\n'
'<ul>'
'<li> detailed - This the default format. It prints out a '
'detailed overview of the CLI command ran. It displays all '
'of the key events in the command lifecycle where each '
'important event has a title and its important values '
'underneath. The events are ordered by timestamp and events of '
'the same API call are associated together with the '
'[``api_id``] notation where events that share the same '
'``api_id`` belong to the lifecycle of the same API call.'
'</li>'
'</ul>'
)
}
]
def _run_main(self, parsed_args, parsed_globals):
self._connect_to_history_db()
try:
self._validate_args(parsed_args)
with self._get_output_stream() as output_stream:
formatter = self._get_formatter(
parsed_args, parsed_globals, output_stream)
for record in self._get_record_iterator(parsed_args):
formatter.display(record)
finally:
self._close_history_db()
return 0
def _validate_args(self, parsed_args):
if parsed_args.exclude and parsed_args.include:
raise ValueError(
'Either --exclude or --include can be provided but not both')
def _get_formatter(self, parsed_args, parsed_globals, output_stream):
format_type = parsed_args.format
formatter_kwargs = {
'include': parsed_args.include,
'exclude': parsed_args.exclude,
'output': output_stream
}
if format_type == 'detailed':
formatter_kwargs['colorize'] = self._should_use_color(
parsed_globals)
return self.FORMATTERS[format_type](**formatter_kwargs)
def _get_record_iterator(self, parsed_args):
if parsed_args.command_id == 'latest':
return self._db_reader.iter_latest_records()
else:
return self._db_reader.iter_records(parsed_args.command_id)
| 37.554745
| 79
| 0.586718
|
import datetime
import json
import sys
import xml.parsers.expat
import xml.dom.minidom
import colorama
from awscli.compat import six
from awscli.customizations.history.commands import HistorySubcommand
from awscli.customizations.history.filters import RegexFilter
class Formatter(object):
def __init__(self, output=None, include=None, exclude=None):
self._output = output
if self._output is None:
self._output = sys.stdout
if include and exclude:
raise ValueError(
'Either input or exclude can be provided but not both')
self._include = include
self._exclude = exclude
def display(self, event_record):
if self._should_display(event_record):
self._display(event_record)
def _display(self, event_record):
raise NotImplementedError('_display()')
def _should_display(self, event_record):
if self._include:
return event_record['event_type'] in self._include
elif self._exclude:
return event_record['event_type'] not in self._exclude
else:
return True
class DetailedFormatter(Formatter):
_SIG_FILTER = RegexFilter(
'Signature=([a-z0-9]{4})[a-z0-9]{60}',
r'Signature=\1...',
)
_SECTIONS = {
'CLI_VERSION': {
'title': 'AWS CLI command entered',
'values': [
{'description': 'with AWS CLI version'}
]
},
'CLI_ARGUMENTS': {
'values': [
{'description': 'with arguments'}
]
},
'API_CALL': {
'title': 'API call made',
'values': [
{
'description': 'to service',
'payload_key': 'service'
},
{
'description': 'using operation',
'payload_key': 'operation'
},
{
'description': 'with parameters',
'payload_key': 'params',
'value_format': 'dictionary'
}
]
},
'HTTP_REQUEST': {
'title': 'HTTP request sent',
'values': [
{
'description': 'to URL',
'payload_key': 'url'
},
{
'description': 'with method',
'payload_key': 'method'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary',
'filters': [_SIG_FILTER]
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'HTTP_RESPONSE': {
'title': 'HTTP response received',
'values': [
{
'description': 'with status code',
'payload_key': 'status_code'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary'
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'PARSED_RESPONSE': {
'title': 'HTTP response parsed',
'values': [
{
'description': 'parsed to',
'value_format': 'dictionary'
}
]
},
'CLI_RC': {
'title': 'AWS CLI command exited',
'values': [
{'description': 'with return code'}
]
},
}
_COMPONENT_COLORS = {
'title': colorama.Style.BRIGHT,
'description': colorama.Fore.CYAN
}
def __init__(self, output=None, include=None, exclude=None, colorize=True):
super(DetailedFormatter, self).__init__(output, include, exclude)
self._request_id_to_api_num = {}
self._num_api_calls = 0
self._colorize = colorize
self._value_pformatter = SectionValuePrettyFormatter()
if self._colorize:
colorama.init(autoreset=True, strip=False)
def _display(self, event_record):
section_definition = self._SECTIONS.get(event_record['event_type'])
if section_definition is not None:
self._display_section(event_record, section_definition)
def _display_section(self, event_record, section_definition):
if 'title' in section_definition:
self._display_title(section_definition['title'], event_record)
for value_definition in section_definition['values']:
self._display_value(value_definition, event_record)
def _display_title(self, title, event_record):
formatted_title = self._format_section_title(title, event_record)
self._write_output(formatted_title)
def _display_value(self, value_definition, event_record):
value_description = value_definition['description']
event_record_payload = event_record['payload']
value = event_record_payload
if 'payload_key' in value_definition:
value = event_record_payload[value_definition['payload_key']]
formatted_value = self._format_description(value_description)
formatted_value += self._format_value(
value, event_record, value_definition.get('value_format')
)
if 'filters' in value_definition:
for text_filter in value_definition['filters']:
formatted_value = text_filter.filter_text(formatted_value)
self._write_output(formatted_value)
def _write_output(self, content):
if isinstance(content, six.text_type):
content = content.encode('utf-8')
self._output.write(content)
def _format_section_title(self, title, event_record):
formatted_title = title
api_num = self._get_api_num(event_record)
if api_num is not None:
formatted_title = ('[%s] ' % api_num) + formatted_title
formatted_title = self._color_if_configured(formatted_title, 'title')
formatted_title += '\n'
formatted_timestamp = self._format_description('at time')
formatted_timestamp += self._format_value(
event_record['timestamp'], event_record, value_format='timestamp')
return '\n' + formatted_title + formatted_timestamp
def _get_api_num(self, event_record):
request_id = event_record['request_id']
if request_id:
if request_id not in self._request_id_to_api_num:
self._request_id_to_api_num[
request_id] = self._num_api_calls
self._num_api_calls += 1
return self._request_id_to_api_num[request_id]
def _format_description(self, value_description):
return self._color_if_configured(
value_description + ': ', 'description')
def _format_value(self, value, event_record, value_format=None):
if value_format:
formatted_value = self._value_pformatter.pformat(
value, value_format, event_record)
else:
formatted_value = str(value)
return formatted_value + '\n'
def _color_if_configured(self, text, component):
if self._colorize:
color = self._COMPONENT_COLORS[component]
return color + text + colorama.Style.RESET_ALL
return text
class SectionValuePrettyFormatter(object):
def pformat(self, value, value_format, event_record):
return getattr(self, '_pformat_' + value_format)(value, event_record)
def _pformat_timestamp(self, event_timestamp, event_record=None):
return datetime.datetime.fromtimestamp(
event_timestamp/1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def _pformat_dictionary(self, obj, event_record=None):
return json.dumps(obj=obj, sort_keys=True, indent=4)
def _pformat_http_body(self, body, event_record):
if not body:
return 'There is no associated body'
elif event_record['payload'].get('streaming', False):
return 'The body is a stream and will not be displayed'
elif self._is_xml(body):
return self._get_pretty_xml(body)
elif self._is_json_structure(body):
return self._get_pretty_json(body)
else:
return body
def _get_pretty_xml(self, body):
stripped_body = self._strip_whitespace(body)
xml_dom = xml.dom.minidom.parseString(stripped_body)
return xml_dom.toprettyxml(indent=' '*4, newl='\n')
def _get_pretty_json(self, body):
obj = json.loads(body)
return self._pformat_dictionary(obj)
def _is_xml(self, body):
try:
xml.dom.minidom.parseString(body)
except xml.parsers.expat.ExpatError:
return False
return True
def _strip_whitespace(self, xml_string):
xml_dom = xml.dom.minidom.parseString(xml_string)
return ''.join(
[line.strip() for line in xml_dom.toxml().splitlines()]
)
def _is_json_structure(self, body):
if body.startswith('{'):
try:
json.loads(body)
return True
except json.decoder.JSONDecodeError:
return False
return False
class ShowCommand(HistorySubcommand):
NAME = 'show'
DESCRIPTION = (
'Shows the various events related to running a specific CLI command. '
'If this command is ran without any positional arguments, it will '
'display the events for the last CLI command ran.'
)
FORMATTERS = {
'detailed': DetailedFormatter
}
ARG_TABLE = [
{'name': 'command_id', 'nargs': '?', 'default': 'latest',
'positional_arg': True,
'help_text': (
'The ID of the CLI command to show. If this positional argument '
'is omitted, it will show the last the CLI command ran.')},
{'name': 'include', 'nargs': '+',
'help_text': (
'Specifies which events to **only** include when showing the '
'CLI command. This argument is mutually exclusive with '
'``--exclude``.')},
{'name': 'exclude', 'nargs': '+',
'help_text': (
'Specifies which events to exclude when showing the '
'CLI command. This argument is mutually exclusive with '
'``--include``.')},
{'name': 'format', 'choices': FORMATTERS.keys(),
'default': 'detailed', 'help_text': (
'Specifies which format to use in showing the events for '
'the specified CLI command. The following formats are '
'supported:\n\n'
'<ul>'
'<li> detailed - This the default format. It prints out a '
'detailed overview of the CLI command ran. It displays all '
'of the key events in the command lifecycle where each '
'important event has a title and its important values '
'underneath. The events are ordered by timestamp and events of '
'the same API call are associated together with the '
'[``api_id``] notation where events that share the same '
'``api_id`` belong to the lifecycle of the same API call.'
'</li>'
'</ul>'
)
}
]
def _run_main(self, parsed_args, parsed_globals):
self._connect_to_history_db()
try:
self._validate_args(parsed_args)
with self._get_output_stream() as output_stream:
formatter = self._get_formatter(
parsed_args, parsed_globals, output_stream)
for record in self._get_record_iterator(parsed_args):
formatter.display(record)
finally:
self._close_history_db()
return 0
def _validate_args(self, parsed_args):
if parsed_args.exclude and parsed_args.include:
raise ValueError(
'Either --exclude or --include can be provided but not both')
def _get_formatter(self, parsed_args, parsed_globals, output_stream):
format_type = parsed_args.format
formatter_kwargs = {
'include': parsed_args.include,
'exclude': parsed_args.exclude,
'output': output_stream
}
if format_type == 'detailed':
formatter_kwargs['colorize'] = self._should_use_color(
parsed_globals)
return self.FORMATTERS[format_type](**formatter_kwargs)
def _get_record_iterator(self, parsed_args):
if parsed_args.command_id == 'latest':
return self._db_reader.iter_latest_records()
else:
return self._db_reader.iter_records(parsed_args.command_id)
| true
| true
|
f70159d65a5bfff918de93d5a0e04e7f23f300b0
| 1,711
|
py
|
Python
|
Python/venv/lib/python3.7/site-packages/prometheus_client/platform_collector.py
|
HenriqueBuzin/TCC
|
5fb9db42e97e28131bff97da3252a9ee33b3684e
|
[
"Unlicense"
] | 69
|
2019-02-18T12:07:35.000Z
|
2022-03-12T10:38:32.000Z
|
Python/venv/lib/python3.7/site-packages/prometheus_client/platform_collector.py
|
HenriqueBuzin/TCC
|
5fb9db42e97e28131bff97da3252a9ee33b3684e
|
[
"Unlicense"
] | 12
|
2018-12-06T22:06:49.000Z
|
2022-02-25T17:40:44.000Z
|
Python/venv/lib/python3.7/site-packages/prometheus_client/platform_collector.py
|
HenriqueBuzin/TCC
|
5fb9db42e97e28131bff97da3252a9ee33b3684e
|
[
"Unlicense"
] | 28
|
2019-03-22T01:07:13.000Z
|
2022-02-21T16:38:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import unicode_literals
import platform as pf
from . import core
class PlatformCollector(object):
"""Collector for python platform information"""
def __init__(self, registry=core.REGISTRY, platform=None):
self._platform = pf if platform is None else platform
info = self._info()
system = self._platform.system()
if system == "Java":
info.update(self._java())
self._metrics = [
self._add_metric("python_info", "Python platform information", info)
]
if registry:
registry.register(self)
def collect(self):
return self._metrics
@staticmethod
def _add_metric(name, documentation, data):
labels = data.keys()
values = [data[k] for k in labels]
g = core.GaugeMetricFamily(name, documentation, labels=labels)
g.add_metric(values, 1)
return g
def _info(self):
major, minor, patchlevel = self._platform.python_version_tuple()
return {
"version": self._platform.python_version(),
"implementation": self._platform.python_implementation(),
"major": major,
"minor": minor,
"patchlevel": patchlevel
}
def _java(self):
java_version, _, vminfo, osinfo = self._platform.java_ver()
vm_name, vm_release, vm_vendor = vminfo
return {
"jvm_version": java_version,
"jvm_release": vm_release,
"jvm_vendor": vm_vendor,
"jvm_name": vm_name
}
PLATFORM_COLLECTOR = PlatformCollector()
"""PlatformCollector in default Registry REGISTRY"""
| 29
| 80
| 0.613676
|
from __future__ import unicode_literals
import platform as pf
from . import core
class PlatformCollector(object):
def __init__(self, registry=core.REGISTRY, platform=None):
self._platform = pf if platform is None else platform
info = self._info()
system = self._platform.system()
if system == "Java":
info.update(self._java())
self._metrics = [
self._add_metric("python_info", "Python platform information", info)
]
if registry:
registry.register(self)
def collect(self):
return self._metrics
@staticmethod
def _add_metric(name, documentation, data):
labels = data.keys()
values = [data[k] for k in labels]
g = core.GaugeMetricFamily(name, documentation, labels=labels)
g.add_metric(values, 1)
return g
def _info(self):
major, minor, patchlevel = self._platform.python_version_tuple()
return {
"version": self._platform.python_version(),
"implementation": self._platform.python_implementation(),
"major": major,
"minor": minor,
"patchlevel": patchlevel
}
def _java(self):
java_version, _, vminfo, osinfo = self._platform.java_ver()
vm_name, vm_release, vm_vendor = vminfo
return {
"jvm_version": java_version,
"jvm_release": vm_release,
"jvm_vendor": vm_vendor,
"jvm_name": vm_name
}
PLATFORM_COLLECTOR = PlatformCollector()
| true
| true
|
f70159fa731ebd5131cfc64b025fdad8ba885564
| 222
|
py
|
Python
|
client codes/client v2/node_free.py
|
nathaniel-security/Localhost-distributed-computing-engine
|
4643c22f563a969ccaf1062da17696819e00ab9a
|
[
"MIT"
] | null | null | null |
client codes/client v2/node_free.py
|
nathaniel-security/Localhost-distributed-computing-engine
|
4643c22f563a969ccaf1062da17696819e00ab9a
|
[
"MIT"
] | null | null | null |
client codes/client v2/node_free.py
|
nathaniel-security/Localhost-distributed-computing-engine
|
4643c22f563a969ccaf1062da17696819e00ab9a
|
[
"MIT"
] | null | null | null |
from client_database_connection import mycursor
import os
sql = "INSERT INTO free_node (node_id) VALUES (%s)"
val = (node_id)
mycursor.execute(sql, val)
command = 'python get_code_when_free.py'
os.system(command)
| 24.666667
| 52
| 0.752252
|
from client_database_connection import mycursor
import os
sql = "INSERT INTO free_node (node_id) VALUES (%s)"
val = (node_id)
mycursor.execute(sql, val)
command = 'python get_code_when_free.py'
os.system(command)
| true
| true
|
f7015a05765804fec4cfa4d48e357987ad3578a0
| 114,236
|
py
|
Python
|
research/object_detection/meta_architectures/faster_rcnn_meta_arch_override_RPN.py
|
AXATechLab/models
|
c39ac760cfa6ce2339f5781f2a78d70db3ea5bb2
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/meta_architectures/faster_rcnn_meta_arch_override_RPN.py
|
AXATechLab/models
|
c39ac760cfa6ce2339f5781f2a78d70db3ea5bb2
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/meta_architectures/faster_rcnn_meta_arch_override_RPN.py
|
AXATechLab/models
|
c39ac760cfa6ce2339f5781f2a78d70db3ea5bb2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster R-CNN meta-architecture definition.
General tensorflow implementation of Faster R-CNN detection models.
See Faster R-CNN: Ren, Shaoqing, et al.
"Faster R-CNN: Towards real-time object detection with region proposal
networks." Advances in neural information processing systems. 2015.
We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,
all of the user facing methods (e.g., predict, postprocess, loss) can be used as
if the model consisted only of the RPN, returning class agnostic proposals
(these can be thought of as approximate detections with no associated class
information). In case of 2 stages, proposals are computed, then passed
through a second stage "box classifier" to yield (multi-class) detections.
Finally, in case of 3 stages which is only used during eval, proposals are
computed, then passed through a second stage "box classifier" that will compute
refined boxes and classes, and then features are pooled from the refined and
non-maximum suppressed boxes and are passed through the box classifier again. If
number of stages is 3 during training it will be reduced to two automatically.
Implementations of Faster R-CNN models must define a new
FasterRCNNFeatureExtractor and override three methods: `preprocess`,
`_extract_proposal_features` (the first stage of the model), and
`_extract_box_classifier_features` (the second stage of the model). Optionally,
the `restore_fn` method can be overridden. See tests for an example.
A few important notes:
+ Batching conventions: We support batched inference and training where
all images within a batch have the same resolution. Batch sizes are determined
dynamically via the shape of the input tensors (rather than being specified
directly as, e.g., a model constructor).
A complication is that due to non-max suppression, we are not guaranteed to get
the same number of proposals from the first stage RPN (region proposal network)
for each image (though in practice, we should often get the same number of
proposals). For this reason we pad to a max number of proposals per image
within a batch. This `self.max_num_proposals` property is set to the
`first_stage_max_proposals` parameter at inference time and the
`second_stage_batch_size` at training time since we subsample the batch to
be sent through the box classifier during training.
For the second stage of the pipeline, we arrange the proposals for all images
within the batch along a single batch dimension. For example, the input to
_extract_box_classifier_features is a tensor of shape
`[total_num_proposals, crop_height, crop_width, depth]` where
total_num_proposals is batch_size * self.max_num_proposals. (And note that per
the above comment, a subset of these entries correspond to zero paddings.)
+ Coordinate representations:
Following the API (see model.DetectionModel definition), our outputs after
postprocessing operations are always normalized boxes however, internally, we
sometimes convert to absolute --- e.g. for loss computation. In particular,
anchors and proposal_boxes are both represented as absolute coordinates.
Images are resized in the `preprocess` method.
The Faster R-CNN meta architecture has two post-processing methods
`_postprocess_rpn` which is applied after first stage and
`_postprocess_box_classifier` which is applied after second stage. There are
three different ways post-processing can happen depending on number_of_stages
configured in the meta architecture:
1. When number_of_stages is 1:
`_postprocess_rpn` is run as part of the `postprocess` method where
true_image_shapes is used to clip proposals, perform non-max suppression and
normalize them.
2. When number of stages is 2:
`_postprocess_rpn` is run as part of the `_predict_second_stage` method where
`resized_image_shapes` is used to clip proposals, perform non-max suppression
and normalize them. In this case `postprocess` method skips `_postprocess_rpn`
and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip
detections, perform non-max suppression and normalize them.
3. When number of stages is 3:
`_postprocess_rpn` is run as part of the `_predict_second_stage` using
`resized_image_shapes` to clip proposals, perform non-max suppression and
normalize them. Subsequently, `_postprocess_box_classifier` is run as part of
`_predict_third_stage` using `true_image_shapes` to clip detections, peform
non-max suppression and normalize them. In this case, the `postprocess` method
skips both `_postprocess_rpn` and `_postprocess_box_classifier`.
"""
from abc import abstractmethod
from functools import partial
import tensorflow as tf
import json
import numpy as np
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.builders import box_predictor_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import box_predictor
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import post_processing
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
import sys # for debug
sys.path.append("/notebooks/text-renderer/")
import data_util
slim = tf.contrib.slim
class FasterRCNNFeatureExtractor(object):
"""Faster R-CNN Feature Extractor definition."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
reuse_weights: Whether to reuse variables. Default is None.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._reuse_weights = reuse_weights
self._weight_decay = weight_decay
@abstractmethod
def preprocess(self, resized_inputs):
"""Feature-extractor specific preprocessing (minus image resizing)."""
pass
def extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
This function is responsible for extracting feature maps from preprocessed
images. These features are used by the region proposal network (RPN) to
predict proposals.
Args:
preprocessed_inputs: A [batch, height, width, channels] float tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping activation tensor names to tensors.
"""
with tf.variable_scope(scope, values=[preprocessed_inputs]):
return self._extract_proposal_features(preprocessed_inputs, scope)
@abstractmethod
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features, to be overridden."""
pass
def extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(
scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
@abstractmethod
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features, to be overridden."""
pass
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
for scope_name in [first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace(scope_name + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class FasterRCNNMetaArchOverrideRPN(model.DetectionModel):
"""Faster R-CNN Meta-architecture definition."""
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_nms_score_threshold,
first_stage_nms_iou_threshold,
first_stage_max_proposals,
first_stage_proposals_path,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16,
add_summaries=True,
use_matmul_crop_and_resize=False,
clip_anchors_to_image=False):
"""FasterRCNNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable
takes a rank-3 image tensor of shape [height, width, channels]
(corresponding to a single image), an optional rank-3 instance mask
tensor of shape [num_masks, height, width] and returns a resized rank-3
image tensor, a resized mask tensor if one was provided in the input. In
addition this callable must also return a 1-D tensor of the form
[height, width, channels] containing the size of the true image, as the
image resizer can perform zero padding. See protos/image_resizer.proto.
feature_extractor: A FasterRCNNFeatureExtractor object.
number_of_stages: An integer values taking values in {1, 2, 3}. If
1, the function will construct only the Region Proposal Network (RPN)
part of the model. If 2, the function will perform box refinement and
other auxiliary predictions all in the second stage. If 3, it will
extract features from refined boxes and perform the auxiliary
predictions on the non-maximum suppressed refined boxes.
If is_training is true and the value of number_of_stages is 3, it is
reduced to 2 since all the model heads are trained in parallel in second
stage during training.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
first_stage_target_assigner: Target assigner to use for first stage of
Faster R-CNN (RPN).
first_stage_atrous_rate: A single integer indicating the atrous rate for
the single convolution op which is applied to the `rpn_features_to_crop`
tensor to obtain a tensor to be used for box prediction. Some feature
extractors optionally allow for producing feature maps computed at
denser resolutions. The atrous rate is used to compensate for the
denser feature maps by using an effectively larger receptive field.
(This should typically be set to 1).
first_stage_box_predictor_arg_scope_fn: A function to construct tf-slim
arg_scope for conv2d, separable_conv2d and fully_connected ops for the
RPN box predictor.
first_stage_box_predictor_kernel_size: Kernel size to use for the
convolution op just prior to RPN box predictions.
first_stage_box_predictor_depth: Output depth for the convolution op
just prior to RPN box predictions.
first_stage_minibatch_size: The "batch size" to use for computing the
objectness and location loss of the region proposal network. This
"batch size" refers to the number of anchors selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
first_stage_sampler: Sampler to use for first stage loss (RPN loss).
first_stage_nms_score_threshold: Score threshold for non max suppression
for the Region Proposal Network (RPN). This value is expected to be in
[0, 1] as it is applied directly after a softmax transformation. The
recommended value for Faster R-CNN is 0.
first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold
for performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_max_proposals: Maximum number of boxes to retain after
performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_localization_loss_weight: A float
first_stage_objectness_loss_weight: A float
initial_crop_size: A single integer indicating the output size
(width and height are set to be the same) of the initial bilinear
interpolation based cropping during ROI pooling.
maxpool_kernel_size: A single integer indicating the kernel size of the
max pool op on the cropped feature map during ROI pooling.
maxpool_stride: A single integer indicating the stride of the max pool
op on the cropped feature map during ROI pooling.
second_stage_target_assigner: Target assigner to use for second stage of
Faster R-CNN. If the model is configured with multiple prediction heads,
this target assigner is used to generate targets for all heads (with the
correct `unmatched_class_label`).
second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for
the second stage.
second_stage_batch_size: The batch size used for computing the
classification and refined location loss of the box classifier. This
"batch size" refers to the number of proposals selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
second_stage_sampler: Sampler to use for second stage loss (box
classifier loss).
second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores`, optional `clip_window` and
optional (kwarg) `mask` inputs (with all other inputs already set)
and returns a dictionary containing tensors with keys:
`detection_boxes`, `detection_scores`, `detection_classes`,
`num_detections`, and (optionally) `detection_masks`. See
`post_processing.batch_multiclass_non_max_suppression` for the type and
shape of these tensors.
second_stage_score_conversion_fn: Callable elementwise nonlinearity
(that takes tensors as inputs and returns tensors). This is usually
used to convert logits to probabilities.
second_stage_localization_loss_weight: A float indicating the scale factor
for second stage localization loss.
second_stage_classification_loss_weight: A float indicating the scale
factor for second stage classification loss.
second_stage_classification_loss: Classification loss used by the second
stage classifier. Either losses.WeightedSigmoidClassificationLoss or
losses.WeightedSoftmaxClassificationLoss.
second_stage_mask_prediction_loss_weight: A float indicating the scale
factor for second stage mask prediction loss. This is applicable only if
second stage box predictor is configured to predict masks.
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
use_matmul_crop_and_resize: Force the use of matrix multiplication based
crop and resize instead of standard tf.image.crop_and_resize while
computing second stage input feature maps.
clip_anchors_to_image: Normally, anchors generated for a given image size
are pruned during training if they lie outside the image window. This
option clips the anchors to be within the image instead of pruning.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at
training time.
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO(rathodv): add_summaries is currently unused. Respect that directive
# in the future.
print("Running FasterRCNN with overriden RPN")
super(FasterRCNNMetaArchOverrideRPN, self).__init__(num_classes=num_classes)
# There is no RPN in this implementation!
if (number_of_stages==1):
raise ValueError('Number of stages = 1 is not allowed for overriden RPN proposals')
if is_training and second_stage_batch_size > first_stage_max_proposals:
raise ValueError('second_stage_batch_size should be no greater than '
'first_stage_max_proposals.')
if not isinstance(first_stage_anchor_generator,
grid_anchor_generator.GridAnchorGenerator):
raise ValueError('first_stage_anchor_generator must be of type '
'grid_anchor_generator.GridAnchorGenerator.')
# Michele: Proposals that override the RPN
first_stage_proposals_path = os.path.join(first_stage_proposals_path, '')
xml_root = data_util.read_xml_batch(first_stage_proposals_path)[0]['annot']
_, self.proposals = data_util.xml_to_numpy(None, xml_root)
print("Shape of overriding proposals",self.proposals.shape)
self._is_training = is_training
self._image_resizer_fn = image_resizer_fn
self._feature_extractor = feature_extractor
self._number_of_stages = number_of_stages
self._proposal_target_assigner = first_stage_target_assigner
self._detector_target_assigner = second_stage_target_assigner
# Both proposal and detector target assigners use the same box coder
self._box_coder = self._proposal_target_assigner.box_coder
# (First stage) Region proposal network parameters
self._first_stage_anchor_generator = first_stage_anchor_generator
self._first_stage_atrous_rate = first_stage_atrous_rate
self._first_stage_box_predictor_arg_scope_fn = (
first_stage_box_predictor_arg_scope_fn)
self._first_stage_box_predictor_kernel_size = (
first_stage_box_predictor_kernel_size)
self._first_stage_box_predictor_depth = first_stage_box_predictor_depth
self._first_stage_minibatch_size = first_stage_minibatch_size
self._first_stage_sampler = first_stage_sampler
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0))
self._first_stage_nms_score_threshold = first_stage_nms_score_threshold
self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold
self._first_stage_max_proposals = first_stage_max_proposals
self._first_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._first_stage_objectness_loss = (
losses.WeightedSoftmaxClassificationLoss())
self._first_stage_loc_loss_weight = first_stage_localization_loss_weight
self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight
# Per-region cropping parameters
self._initial_crop_size = initial_crop_size
self._maxpool_kernel_size = maxpool_kernel_size
self._maxpool_stride = maxpool_stride
self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor
self._second_stage_batch_size = second_stage_batch_size
self._second_stage_sampler = second_stage_sampler
self._second_stage_nms_fn = second_stage_non_max_suppression_fn
self._second_stage_score_conversion_fn = second_stage_score_conversion_fn
self._second_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._second_stage_classification_loss = second_stage_classification_loss
self._second_stage_mask_loss = (
losses.WeightedSigmoidClassificationLoss())
self._second_stage_loc_loss_weight = second_stage_localization_loss_weight
self._second_stage_cls_loss_weight = second_stage_classification_loss_weight
self._second_stage_mask_loss_weight = (
second_stage_mask_prediction_loss_weight)
self._use_matmul_crop_and_resize = use_matmul_crop_and_resize
self._hard_example_miner = hard_example_miner
self._parallel_iterations = parallel_iterations
self.clip_anchors_to_image = clip_anchors_to_image
if self._number_of_stages <= 0 or self._number_of_stages > 3:
raise ValueError('Number of stages should be a value in {1, 2, 3}.')
@property
def first_stage_feature_extractor_scope(self):
return 'FirstStageFeatureExtractor'
@property
def second_stage_feature_extractor_scope(self):
return 'SecondStageFeatureExtractor'
@property
def first_stage_box_predictor_scope(self):
return 'FirstStageBoxPredictor'
@property
def second_stage_box_predictor_scope(self):
return 'SecondStageBoxPredictor'
@property
def max_num_proposals(self):
"""Max number of proposals (to pad to) for each image in the input batch.
At training time, this is set to be the `second_stage_batch_size` if hard
example miner is not configured, else it is set to
`first_stage_max_proposals`. At inference time, this is always set to
`first_stage_max_proposals`.
Returns:
A positive integer.
"""
if self._is_training and not self._hard_example_miner:
return self._second_stage_batch_size
#return self._first_stage_max_proposals
return self.proposals.shape[1]
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
For Faster R-CNN, we perform image resizing in the base class --- each
class subclassing FasterRCNNMetaArch is responsible for any additional
preprocessing (e.g., scaling pixel values to be in [-1, 1]).
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32],
parallel_iterations=self._parallel_iterations)
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, image_shapes):
"""Computes clip window for non max suppression based on image shapes.
This function assumes that the clip window's left top corner is at (0, 0).
Args:
image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing
shapes of images in the batch. Each row represents [height, width,
channels] of an image.
Returns:
A 2-D float32 tensor of shape [batch_size, 4] containing the clip window
for each image in the form [ymin, xmin, ymax, xmax].
"""
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights),
clip_heights, clip_widths], axis=1))
return clip_window
def predict(self, preprocessed_inputs, true_image_shapes):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the
forward pass of the network to yield "raw" un-postprocessed predictions.
If `number_of_stages` is 1, this function only returns first stage
RPN predictions (un-postprocessed). Otherwise it returns both
first stage RPN predictions as well as second stage box classifier
predictions.
Other remarks:
+ Anchor pruning vs. clipping: following the recommendation of the Faster
R-CNN paper, we prune anchors that venture outside the image window at
training time and clip anchors to the image window at inference time.
+ Proposal padding: as described at the top of the file, proposals are
padded to self._max_num_proposals and flattened so that proposals from all
images within the input batch are arranged along the same batch dimension.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch_size, height, width, depth] to be used for predicting proposal
boxes and corresponding objectness scores.
2) rpn_features_to_crop: A 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
3) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
4) rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
5) rpn_objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN (in absolute coordinates). Note that
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
(and if number_of_stages > 1):
7) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using
a shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
8) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
9) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
10) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
11) mask_predictions: (optional) a 4-D tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask predictions.
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
'''(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,
image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)'''
print("Predict running")
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(
preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
#(rpn_box_encodings, rpn_objectness_predictions_with_background
#) = self._predict_rpn_proposals(rpn_box_predictor_features)
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
'''clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))
if self._is_training:
if self.clip_anchors_to_image:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window, filter_nonoverlapping=False)
else:
(rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist) = self._remove_invalid_anchors_and_predictions(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist, clip_window)
else:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
self._anchors = anchors_boxlist'''
prediction_dict = {
#'rpn_box_predictor_features': rpn_box_predictor_features,
'rpn_features_to_crop': rpn_features_to_crop,
'image_shape': image_shape,
#'rpn_box_encodings': rpn_box_encodings,
#'rpn_objectness_predictions_with_background':
#rpn_objectness_predictions_with_background,
#'anchors': self._anchors.get()
}
if self._number_of_stages >= 2:
'''prediction_dict.update(self._predict_second_stage(
rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
self._anchors.get(), image_shape, true_image_shapes))'''
prediction_dict.update(self._predict_second_stage(
rpn_features_to_crop, image_shape, true_image_shapes))
if self._number_of_stages == 3:
prediction_dict = self._predict_third_stage(
prediction_dict, true_image_shapes)
return prediction_dict
def _image_batch_shape_2d(self, image_batch_shape_1d):
"""Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.
Example:
If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D
image batch tensor would be [[300, 300, 3], [300, 300, 3]]
Args:
image_batch_shape_1d: 1-D tensor of the form [batch_size, height,
width, channels].
Returns:
image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is
of the form [height, width, channels].
"""
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
'''def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors,
image_shape,
true_image_shapes):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
rpn_features_to_crop: A 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
6) box_classifier_features: a 4-D float32 tensor representing the
features for each proposal.
"""
image_shape_2d = self._image_batch_shape_2d(image_shape)
proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, true_image_shapes)
# Override RPN proposals
# proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=("original size= " + str(proposal_boxes_normalized.shape[1])))
# proposal_boxes_normalized = tf.constant(self.proposals, dtype='float32')
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_proposal_feature_maps,
scope=self.second_stage_feature_extractor_scope))
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
}
return prediction_dict'''
def _predict_second_stage(self, rpn_features_to_crop,
image_shape,
true_image_shapes):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_features_to_crop: A 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
6) box_classifier_features: a 4-D float32 tensor representing the
features for each proposal.
"""
image_shape_2d = self._image_batch_shape_2d(image_shape) # same as true shape
'''proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, true_image_shapes)'''
# Override RPN proposals
# proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=("original size= " + str(proposal_boxes_normalized.shape[1])))
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
def to_absolute_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_absolute_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
proposal_boxes = tf.constant(self.proposals, dtype='float32')
proposal_boxes = shape_utils.static_or_dynamic_map_fn(
to_absolute_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)
num_proposals = tf.constant([proposal_boxes.shape[1]], dtype='int32')
# single_image_boxlist = box_list.BoxList(proposals_absolute)
# proposal_boxes = self._sample_box_classifier_minibatch_single_image(single_image_boxlist, num_proposals, groundtruth_boxlists[0],
# groundtruth_classes_with_background_list[0], groundtruth_weights_list[0]).get()
# Minibatch sampling during training
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
placeholder_scores = tf.zeros((1, proposal_boxes.shape[1], 2))
#proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message="1: ")
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, _, num_proposals) = self._sample_box_classifier_batch(proposal_boxes, placeholder_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, true_image_shapes[0])
#proposal_boxes = tf.Print(proposal_boxes, [proposal_boxes], message="2: ")
#proposal_boxes = tf.Print(proposal_boxes, [], message=("Shape of pboxes " + str(proposal_boxes.shape[1])))
#num_proposals = tf.Print(num_proposals, [num_proposals])
proposal_boxes_normalized = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)
#proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [proposal_boxes_normalized], message="3: ")
#proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [tf.shape(proposal_boxes_normalized)], message=("Shape of pboxes "))
#proposal_boxes_normalized = tf.constant(self.proposals[:, 0:64, :], dtype='float32')
#proposal_boxes_normalized = tf.Print(proposal_boxes_normalized, [], message=("Shape of minibatch " + str(proposal_boxes_normalized.shape[1])))
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
#flattened_proposal_feature_maps = tf.stop_gradient(flattened_proposal_feature_maps)
#flattened_proposal_feature_maps = tf.Print(flattened_proposal_feature_maps, [], message=("Cropped props : " + str(flattened_proposal_feature_maps.shape)))
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_proposal_feature_maps,
scope=self.second_stage_feature_extractor_scope))
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
}
return prediction_dict
def _predict_third_stage(self, prediction_dict, image_shapes):
"""Predicts non-box, non-class outputs using refined detections.
For training, masks as predicted directly on the box_classifier_features,
which are region-features from the initial anchor boxes.
For inference, this happens after calling the post-processing stage, such
that masks are only calculated for the top scored boxes.
Args:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) box_classifier_features: a 4-D float32 tensor representing the
features for each proposal.
image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing
shapes of images in the batch.
Returns:
prediction_dict: a dictionary that in addition to the input predictions
does hold the following predictions as well:
1) mask_predictions: a 4-D tensor with shape
[batch_size, max_detection, mask_height, mask_width] containing
instance mask predictions.
"""
if self._is_training:
curr_box_classifier_features = prediction_dict['box_classifier_features']
detection_classes = prediction_dict['class_predictions_with_background']
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
else:
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shapes)
prediction_dict.update(detections_dict)
detection_boxes = detections_dict[
fields.DetectionResultFields.detection_boxes]
detection_classes = detections_dict[
fields.DetectionResultFields.detection_classes]
rpn_features_to_crop = prediction_dict['rpn_features_to_crop']
batch_size = tf.shape(detection_boxes)[0]
max_detection = tf.shape(detection_boxes)[1]
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes))
curr_box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_detected_feature_maps,
scope=self.second_stage_feature_extractor_scope))
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
detection_masks = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
_, num_classes, mask_height, mask_width = (
detection_masks.get_shape().as_list())
_, max_detection = detection_classes.get_shape().as_list()
if num_classes > 1:
detection_masks = self._gather_instance_masks(
detection_masks, detection_classes)
prediction_dict[fields.DetectionResultFields.detection_masks] = (
tf.reshape(detection_masks,
[batch_size, max_detection, mask_height, mask_width]))
return prediction_dict
def _gather_instance_masks(self, instance_masks, classes):
"""Gathers the masks that correspond to classes.
Args:
instance_masks: A 4-D float32 tensor with shape
[K, num_classes, mask_height, mask_width].
classes: A 2-D int32 tensor with shape [batch_size, max_detection].
Returns:
masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].
"""
_, num_classes, height, width = instance_masks.get_shape().as_list()
k = tf.shape(instance_masks)[0]
instance_masks = tf.reshape(instance_masks, [-1, height, width])
classes = tf.to_int32(tf.reshape(classes, [-1]))
gather_idx = tf.range(k) * num_classes + classes
return tf.gather(instance_masks, gather_idx)
def _extract_rpn_feature_maps(self, preprocessed_inputs):
"""Extracts RPN features.
This function extracts two feature maps: a feature map to be directly
fed to a box predictor (to predict location and objectness scores for
proposals) and a feature map from which to crop regions which will then
be sent to the second stage box classifier.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
Returns:
rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch, height, width, depth] to be used for predicting proposal boxes
and corresponding objectness scores.
rpn_features_to_crop: A 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
anchors: A BoxList representing anchors (for the RPN) in
absolute coordinates.
image_shape: A 1-D tensor representing the input image shape.
"""
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(
preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors = box_list_ops.concatenate(
self._first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):
kernel_size = self._first_stage_box_predictor_kernel_size
rpn_box_predictor_features = slim.conv2d(
rpn_features_to_crop,
self._first_stage_box_predictor_depth,
kernel_size=[kernel_size, kernel_size],
rate=self._first_stage_atrous_rate,
activation_fn=tf.nn.relu6)
return (rpn_box_predictor_features, rpn_features_to_crop,
anchors, image_shape)
def _predict_rpn_proposals(self, rpn_box_predictor_features):
"""Adds box predictors to RPN feature map to predict proposals.
Note resulting tensors will not have been postprocessed.
Args:
rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch, height, width, depth] to be used for predicting proposal boxes
and corresponding objectness scores.
Returns:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
Raises:
RuntimeError: if the anchor generator generates anchors corresponding to
multiple feature maps. We currently assume that a single feature map
is generated for the RPN.
"""
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
if self._first_stage_box_predictor.is_keras_model:
box_predictions = self._first_stage_box_predictor(
[rpn_box_predictor_features])
else:
box_predictions = self._first_stage_box_predictor.predict(
[rpn_box_predictor_features],
num_anchors_per_location,
scope=self.first_stage_box_predictor_scope)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (tf.squeeze(box_encodings, axis=2),
objectness_predictions_with_background)
def _remove_invalid_anchors_and_predictions(
self,
box_encodings,
objectness_predictions_with_background,
anchors_boxlist,
clip_window):
"""Removes anchors that (partially) fall outside an image.
Also removes associated box encodings and objectness predictions.
Args:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)
in absolute coordinates.
clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]
extent of the window to clip/prune to.
Returns:
box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes, where num_valid_anchors <= num_anchors
objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors, where
num_valid_anchors <= num_anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in
absolute coordinates.
"""
pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(
anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return shape_utils.static_or_dynamic_map_fn(
partial(tf.gather, indices=keep_indices),
elems=predictions_tensor,
dtype=tf.float32,
parallel_iterations=self._parallel_iterations,
back_prop=True)
return (_batch_gather_kept_indices(box_encodings),
_batch_gather_kept_indices(objectness_predictions_with_background),
pruned_anchors_boxlist)
def _flatten_first_two_dimensions(self, inputs):
"""Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.
Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
[A * B, ..., depth].
Args:
inputs: A float tensor with shape [A, B, ..., depth]. Note that the first
two and last dimensions must be statically defined.
Returns:
A float tensor with shape [A * B, ..., depth] (where the first and last
dimension are statically defined.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
def postprocess(self, prediction_dict, true_image_shapes):
"""Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a different interpretation).
If number_of_stages=1, the returned results represent proposals from the
first stage RPN and are padded to have self.max_num_proposals for each
image; otherwise, the results can be interpreted as multiclass detections
from the full two-stage model and are padded to self._max_detections.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
and `anchors` fields. Otherwise we expect prediction_dict to
additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`,
`proposal_boxes` and, optionally, `mask_predictions` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detection, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
(this entry is only created if rpn_mode=False)
num_detections: [batch]
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
with tf.name_scope('FirstStagePostprocessor'):
if self._number_of_stages == 1:
# Michele's addition
proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'],
true_image_shapes,
true_image_shapes)
return {
fields.DetectionResultFields.detection_boxes: proposal_boxes,
fields.DetectionResultFields.detection_scores: proposal_scores,
fields.DetectionResultFields.num_detections:
tf.to_float(num_proposals),
}
# TODO(jrru): Remove mask_predictions from _post_process_box_classifier.
with tf.name_scope('SecondStagePostprocessor'):
if (self._number_of_stages == 2 or
(self._number_of_stages == 3 and self._is_training)):
mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=mask_predictions)
return detections_dict
if self._number_of_stages == 3:
# Post processing is already performed in 3rd stage. We need to transfer
# postprocessed tensors from `prediction_dict` to `detections_dict`.
detections_dict = {}
for key in prediction_dict:
if key == fields.DetectionResultFields.detection_masks:
detections_dict[key] = tf.sigmoid(prediction_dict[key])
elif 'detection' in key:
detections_dict[key] = prediction_dict[key]
return detections_dict
def _postprocess_rpn(self,
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes,
true_image_shapes):
"""Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
on the result.
Note that the behavior of this function is slightly modified during
training --- specifically, we stop the gradient from passing through the
proposal boxes and we only return a balanced sampled subset of proposals
with size `second_stage_batch_size`.
Args:
rpn_box_encodings_batch: A 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background_batch: A 3-D float tensor of
shape [batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of
images in the batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, max_num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
proposal_scores: A float tensor with shape
[batch_size, max_num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
"""
rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax_without_background = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)[:, :, 1]
clip_window = self._compute_clip_window(image_shapes)
(proposal_boxes, proposal_scores, _, _, _,
num_proposals) = post_processing.batch_multiclass_non_max_suppression(
tf.expand_dims(proposal_boxes, axis=2),
tf.expand_dims(rpn_objectness_softmax_without_background,
axis=2),
self._first_stage_nms_score_threshold,
self._first_stage_nms_iou_threshold,
self._first_stage_max_proposals,
self._first_stage_max_proposals,
clip_window=clip_window)
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, proposal_scores,
num_proposals) = self._sample_box_classifier_batch(
proposal_boxes, proposal_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
return normalized_proposal_boxes, proposal_scores, num_proposals
def _sample_box_classifier_batch(
self,
proposal_boxes,
proposal_scores,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
debug=None):
"""Samples a minibatch for second stage.
Args:
proposal_boxes: A float tensor with shape
[batch_size, num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes]
indicating the weight associated with the groundtruth boxes.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, second_stage_batch_size, 4] representing the (potentially
zero padded) proposal boxes for all images in the batch. These boxes
are represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, second_stage_batch_size] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
"""
single_image_proposal_box_sample = []
single_image_proposal_score_sample = []
single_image_num_proposals_sample = []
for (single_image_proposal_boxes,
single_image_proposal_scores,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights) in zip(
tf.unstack(proposal_boxes),
tf.unstack(proposal_scores),
tf.unstack(num_proposals),
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)
single_image_boxlist.add_field(fields.BoxListFields.scores,
single_image_proposal_scores)
sampled_boxlist = self._sample_box_classifier_minibatch_single_image(
single_image_boxlist,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights,
debug)
# sampled_boxlist.set(tf.Print(sampled_boxlist.get(), [sampled_boxlist.num_boxes()], message="sample size "))
sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(
sampled_boxlist,
num_boxes=self._second_stage_batch_size)
single_image_num_proposals_sample.append(tf.minimum(
sampled_boxlist.num_boxes(),
self._second_stage_batch_size))
bb = sampled_padded_boxlist.get()
#bb = tf.Print(bb, [single_image_groundtruth_boxlist.num_boxes()], message=("After padding and num of GT" + str(bb.shape)))
single_image_proposal_box_sample.append(bb)
single_image_proposal_score_sample.append(
sampled_padded_boxlist.get_field(fields.BoxListFields.scores))
return (tf.stack(single_image_proposal_box_sample),
tf.stack(single_image_proposal_score_sample),
tf.stack(single_image_num_proposals_sample))
def _format_groundtruth_data(self, true_image_shapes, stage='detection'):
"""Helper function for preparing groundtruth data for target assignment.
In order to be consistent with the model.DetectionModel interface,
groundtruth boxes are specified in normalized coordinates and classes are
specified as label indices with no assumed background category. To prepare
for target assignment, we:
1) convert boxes to absolute coordinates,
2) add a background class at class index 0
3) groundtruth instance masks, if available, are resized to match
image_shape.
Args:
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of
shape [num_boxes, image_height, image_width] containing instance masks.
This is set to None if no masks exist in the provided groundtruth.
"""
groundtruth_boxlists = [
box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), true_image_shapes[i, 0],
true_image_shapes[i, 1])
for i, boxes in enumerate(
self.groundtruth_lists(fields.BoxListFields.boxes))
]
groundtruth_classes_with_background_list = [
tf.to_float(
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))
for one_hot_encoding in self.groundtruth_lists(
fields.BoxListFields.classes)]
groundtruth_masks_list = self._groundtruth_lists.get(
fields.BoxListFields.masks)
if groundtruth_masks_list is not None:
resized_masks_list = []
for mask in groundtruth_masks_list:
_, resized_mask, _ = self._image_resizer_fn(
# Reuse the given `image_resizer_fn` to resize groundtruth masks.
# `mask` tensor for an image is of the shape [num_masks,
# image_height, image_width]. Below we create a dummy image of the
# the shape [image_height, image_width, 1] to use with
# `image_resizer_fn`.
image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),
masks=mask)
resized_masks_list.append(resized_mask)
groundtruth_masks_list = resized_masks_list
if self.groundtruth_has_field(fields.BoxListFields.weights):
groundtruth_weights_list = self.groundtruth_lists(
fields.BoxListFields.weights)
else:
# Set weights for all batch elements equally to 1.0
groundtruth_weights_list = []
for groundtruth_classes in groundtruth_classes_with_background_list:
num_gt = tf.shape(groundtruth_classes)[0]
groundtruth_weights = tf.ones(num_gt)
groundtruth_weights_list.append(groundtruth_weights)
return (groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list)
def _sample_box_classifier_minibatch_single_image(
self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,
groundtruth_classes_with_background, groundtruth_weights, debug=None):
"""Samples a mini-batch of proposals to be sent to the box classifier.
Helper function for self._postprocess_rpn.
Args:
proposal_boxlist: A BoxList containing K proposal boxes in absolute
coordinates.
num_valid_proposals: Number of valid proposals in the proposal boxlist.
groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in
absolute coordinates.
groundtruth_classes_with_background: A tensor with shape
`[N, self.num_classes + 1]` representing groundtruth classes. The
classes are assumed to be k-hot encoded, and include background as the
zero-th class.
groundtruth_weights: Weights attached to the groundtruth_boxes.
debug: contains (optional) true_image_shape
Returns:
a BoxList contained sampled proposals.
"""
(cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(
proposal_boxlist,
groundtruth_boxlist,
groundtruth_classes_with_background,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
groundtruth_weights=groundtruth_weights)
# Selects all boxes as candidates if none of them is selected according
# to cls_weights. This could happen as boxes within certain IOU ranges
# are ignored. If triggered, the selected boxes will still be ignored
# during loss computation.
positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)
# Debug target mapping
#positive_indicator = tf.Print(positive_indicator, [positive_indicator, box_list_ops.to_normalized_coordinates(groundtruth_boxlist, debug[0], debug[1]).get()], summarize=999999)
valid_indicator = tf.logical_and(
tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,
cls_weights > 0
)
sampled_indices = self._second_stage_sampler.subsample(
valid_indicator,
self._second_stage_batch_size,
positive_indicator)
return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
def _compute_second_stage_input_feature_maps(self, features_to_crop,
proposal_boxes_normalized):
"""Crops to a set of proposals from the feature map for a batch of images.
Helper function for self._postprocess_rpn. This function calls
`tf.image.crop_and_resize` to create the feature map to be passed to the
second stage box classifier for each proposal.
Args:
features_to_crop: A float32 tensor with shape
[batch_size, height, width, depth]
proposal_boxes_normalized: A float32 tensor with shape [batch_size,
num_proposals, box_code_size] containing proposal boxes in
normalized coordinates.
Returns:
A float32 tensor with shape [K, new_height, new_width, depth].
"""
def get_box_inds(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
if self._use_matmul_crop_and_resize:
def _single_image_crop_and_resize(inputs):
single_image_features_to_crop, proposal_boxes_normalized = inputs
return ops.matmul_crop_and_resize(
tf.expand_dims(single_image_features_to_crop, 0),
proposal_boxes_normalized,
[self._initial_crop_size, self._initial_crop_size])
cropped_regions = self._flatten_first_two_dimensions(
shape_utils.static_or_dynamic_map_fn(
_single_image_crop_and_resize,
elems=[features_to_crop, proposal_boxes_normalized],
dtype=tf.float32,
parallel_iterations=self._parallel_iterations))
else:
cropped_regions = tf.image.crop_and_resize(
features_to_crop,
self._flatten_first_two_dimensions(proposal_boxes_normalized),
get_box_inds(proposal_boxes_normalized),
(self._initial_crop_size, self._initial_crop_size))
return slim.max_pool2d(
cropped_regions,
[self._maxpool_kernel_size, self._maxpool_kernel_size], # Michele: Being specific to text, we want to preserve width more than height
stride=[self._maxpool_stride, 1])
def _postprocess_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
"""Converts predictions from the second stage box classifier to detections.
Args:
refined_box_encodings: a 3-D float tensor with shape
[total_num_padded_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings. If using a shared
box across classes the shape will instead be
[total_num_padded_proposals, 1, 4]
class_predictions_with_background: a 3-D tensor float with shape
[total_num_padded_proposals, num_classes + 1] containing class
predictions (logits) for each of the proposals. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: a 3-D float tensor with shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in absolute coordinates.
num_proposals: a 1-D int32 tensor of shape [batch] representing the number
of proposals predicted for each image in the batch.
image_shapes: a 2-D int32 tensor containing shapes of input image in the
batch.
mask_predictions: (optional) a 4-D float tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask prediction logits.
Returns:
A dictionary containing:
`detection_boxes`: [batch, max_detection, 4]
`detection_scores`: [batch, max_detections]
`detection_classes`: [batch, max_detections]
`num_detections`: [batch]
`detection_masks`:
(optional) [batch, max_detections, mask_height, mask_width]. Note
that a pixel-wise sigmoid score converter is applied to the detection
masks.
"""
refined_box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1,
self.max_num_proposals,
refined_box_encodings.shape[1],
self._box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(
class_predictions_with_background,
[-1, self.max_num_proposals, self.num_classes + 1]
)
refined_decoded_boxes_batch = self._batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch = (
self._second_stage_score_conversion_fn(
class_predictions_with_background_batch))
class_predictions_batch = tf.reshape(
tf.slice(class_predictions_with_background_batch,
[0, 0, 1], [-1, -1, -1]),
[-1, self.max_num_proposals, self.num_classes])
clip_window = self._compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = mask_predictions.shape[2].value
mask_width = mask_predictions.shape[3].value
mask_predictions = tf.sigmoid(mask_predictions)
mask_predictions_batch = tf.reshape(
mask_predictions, [-1, self.max_num_proposals,
self.num_classes, mask_height, mask_width])
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,
num_detections) = self._second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
masks=mask_predictions_batch)
detections = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections: tf.to_float(num_detections)
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
"""Decodes box encodings with respect to the anchor boxes.
Args:
box_encodings: a 4-D tensor with shape
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
representing box encodings.
anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]
representing decoded bounding boxes. If using a shared box across
classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
Returns:
decoded_boxes: a
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
float tensor representing bounding box predictions (for each image in
batch, proposal and class). If using a shared box across classes the
shape will instead be
[batch_size, num_anchors, 1, self._box_coder.code_size].
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
num_classes = combined_shape[2]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
return tf.reshape(decoded_boxes.get(),
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
'''def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors given prediction tensors.
If number_of_stages=1, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`, and
`proposal_boxes` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`, 'second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
loss_dict = self._loss_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list)
if self._number_of_stages > 1:
loss_dict.update(
self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
prediction_dict['image_shape'],
prediction_dict.get('mask_predictions'),
groundtruth_masks_list,
))
return loss_dict'''
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors given prediction tensors.
If number_of_stages=1, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`, and
`proposal_boxes` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`, 'second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
'''loss_dict = self._loss_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list)'''
#if self._number_of_stages > 1:
# loss_dict.update(
loss_dict = self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
prediction_dict['image_shape'],
prediction_dict.get('mask_predictions'),
groundtruth_masks_list,
)#)
return loss_dict
def _loss_rpn(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list):
"""Computes scalar RPN loss tensors.
Uses self._proposal_target_assigner to obtain regression and classification
targets for the first stage RPN, samples a "minibatch" of anchors to
participate in the loss computation, and returns the RPN losses.
Args:
rpn_box_encodings: A 4-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background: A 2-D float tensor of shape
[batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
groundtruth_boxlists: A list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`) to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope('RPNLoss'):
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._proposal_target_assigner,
anchors_batch=box_list.BoxList(anchors),
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),
gt_weights_batch=groundtruth_weights_list)
batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)
def _minibatch_subsample_fn(inputs):
cls_targets, cls_weights = inputs
return self._first_stage_sampler.subsample(
tf.cast(cls_weights, tf.bool),
self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))
batch_sampled_indices = tf.to_float(shape_utils.static_or_dynamic_map_fn(
_minibatch_subsample_fn,
[batch_cls_targets, batch_cls_weights],
dtype=tf.bool,
parallel_iterations=self._parallel_iterations,
back_prop=True))
# Normalize by number of examples in sampled minibatch
normalizer = tf.reduce_sum(batch_sampled_indices, axis=1)
batch_one_hot_targets = tf.one_hot(
tf.to_int32(batch_cls_targets), depth=2)
sampled_reg_indices = tf.multiply(batch_sampled_indices,
batch_reg_weights)
localization_losses = self._first_stage_localization_loss(
rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices)
objectness_losses = self._first_stage_objectness_loss(
rpn_objectness_predictions_with_background,
batch_one_hot_targets, weights=batch_sampled_indices)
localization_loss = tf.reduce_mean(
tf.reduce_sum(localization_losses, axis=1) / normalizer)
objectness_loss = tf.reduce_mean(
tf.reduce_sum(objectness_losses, axis=1) / normalizer)
localization_loss = tf.multiply(self._first_stage_loc_loss_weight,
localization_loss,
name='localization_loss')
objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,
objectness_loss, name='objectness_loss')
loss_dict = {localization_loss.op.name: localization_loss,
objectness_loss.op.name: objectness_loss}
return loss_dict
def _loss_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
image_shape,
prediction_masks=None,
groundtruth_masks_list=None):
"""Computes scalar box classifier loss tensors.
Uses self._detector_target_assigner to obtain regression and classification
targets for the second stage box classifier, optionally performs
hard mining, and returns losses. All losses are computed independently
for each image and then averaged across the batch.
Please note that for boxes and masks with multiple labels, the box
regression and mask prediction losses are only computed for one label.
This function assumes that the proposal boxes in the "padded" regions are
actually zero (and thus should not be matched to).
Args:
refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, box_coder.code_size] representing
predicted (final) refined box encodings. If using a shared box across
classes this will instead have shape
[total_num_proposals, 1, box_coder.code_size].
class_predictions_with_background: a 2-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: [batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: a list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: a list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
image_shape: a 1-D tensor of shape [4] representing the image shape.
prediction_masks: an optional 4-D tensor with shape [total_num_proposals,
num_classes, mask_height, mask_width] containing the instance masks for
each box.
groundtruth_masks_list: an optional list of 3-D tensors of shape
[num_boxes, image_height, image_width] containing the instance masks for
each of the boxes.
Returns:
a dictionary mapping loss keys ('second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
Raises:
ValueError: if `predict_instance_masks` in
second_stage_mask_rcnn_box_predictor is True and
`groundtruth_masks_list` is not provided.
"""
with tf.name_scope('BoxClassifierLoss'):
paddings_indicator = self._padded_batched_proposals_indicator(
num_proposals, self.max_num_proposals)
proposal_boxlists = [
box_list.BoxList(proposal_boxes_single_image)
for proposal_boxes_single_image in tf.unstack(proposal_boxes)]
batch_size = len(proposal_boxlists)
num_proposals_or_one = tf.to_float(tf.expand_dims(
tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))
normalizer = tf.tile(num_proposals_or_one,
[1, self.max_num_proposals]) * batch_size
(batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, self.max_num_proposals, -1])
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background,
[batch_size * self.max_num_proposals, -1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
# If using a shared box across classes use directly
if refined_box_encodings.shape[1] == 1:
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
# For anchors with multiple labels, picks refined_location_encodings
# for just one class to avoid over-counting for regression loss and
# (optionally) mask loss.
else:
# We only predict refined location encodings for the non background
# classes, but we now pad it to make it compatible with the class
# predictions
refined_box_encodings_with_background = tf.pad(
refined_box_encodings, [[0, 0], [1, 0], [0, 0]])
refined_box_encodings_masked_by_class_targets = tf.boolean_mask(
refined_box_encodings_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings_masked_by_class_targets,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
second_stage_loc_losses = self._second_stage_localization_loss(
reshaped_refined_box_encodings,
batch_reg_targets, weights=batch_reg_weights) / normalizer
second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_classification_loss(
class_predictions_with_background,
batch_cls_targets_with_background,
weights=batch_cls_weights),
ndims=2) / normalizer
second_stage_loc_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_loc_losses, paddings_indicator))
second_stage_cls_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_cls_losses, paddings_indicator))
if self._hard_example_miner:
(second_stage_loc_loss, second_stage_cls_loss
) = self._unpad_proposals_and_apply_hard_mining(
proposal_boxlists, second_stage_loc_losses,
second_stage_cls_losses, num_proposals)
localization_loss = tf.multiply(self._second_stage_loc_loss_weight,
second_stage_loc_loss,
name='localization_loss')
classification_loss = tf.multiply(self._second_stage_cls_loss_weight,
second_stage_cls_loss,
name='classification_loss')
loss_dict = {localization_loss.op.name: localization_loss,
classification_loss.op.name: classification_loss}
second_stage_mask_loss = None
if prediction_masks is not None:
if groundtruth_masks_list is None:
raise ValueError('Groundtruth instance masks not provided. '
'Please configure input reader.')
unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)
(batch_mask_targets, _, _, batch_mask_target_weights,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_masks_list,
unmatched_class_label=unmatched_mask_label,
gt_weights_batch=groundtruth_weights_list)
# Pad the prediction_masks with to add zeros for background class to be
# consistent with class predictions.
if prediction_masks.get_shape().as_list()[1] == 1:
# Class agnostic masks or masks for one-class prediction. Logic for
# both cases is the same since background predictions are ignored
# through the batch_mask_target_weights.
prediction_masks_masked_by_class_targets = prediction_masks
else:
prediction_masks_with_background = tf.pad(
prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])
prediction_masks_masked_by_class_targets = tf.boolean_mask(
prediction_masks_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
mask_height = prediction_masks.shape[2].value
mask_width = prediction_masks.shape[3].value
reshaped_prediction_masks = tf.reshape(
prediction_masks_masked_by_class_targets,
[batch_size, -1, mask_height * mask_width])
batch_mask_targets_shape = tf.shape(batch_mask_targets)
flat_gt_masks = tf.reshape(batch_mask_targets,
[-1, batch_mask_targets_shape[2],
batch_mask_targets_shape[3]])
# Use normalized proposals to crop mask targets from image masks.
flat_normalized_proposals = box_list_ops.to_normalized_coordinates(
box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),
image_shape[1], image_shape[2]).get()
flat_cropped_gt_mask = tf.image.crop_and_resize(
tf.expand_dims(flat_gt_masks, -1),
flat_normalized_proposals,
tf.range(flat_normalized_proposals.shape[0].value),
[mask_height, mask_width])
batch_cropped_gt_mask = tf.reshape(
flat_cropped_gt_mask,
[batch_size, -1, mask_height * mask_width])
second_stage_mask_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_mask_loss(
reshaped_prediction_masks,
batch_cropped_gt_mask,
weights=batch_mask_target_weights),
ndims=2) / (
mask_height * mask_width * tf.maximum(
tf.reduce_sum(
batch_mask_target_weights, axis=1, keep_dims=True
), tf.ones((batch_size, 1))))
second_stage_mask_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_mask_losses, paddings_indicator))
if second_stage_mask_loss is not None:
mask_loss = tf.multiply(self._second_stage_mask_loss_weight,
second_stage_mask_loss, name='mask_loss')
loss_dict[mask_loss.op.name] = mask_loss
return loss_dict
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
"""Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
"""
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
def _unpad_proposals_and_apply_hard_mining(self,
proposal_boxlists,
second_stage_loc_losses,
second_stage_cls_losses,
num_proposals):
"""Unpads proposals and applies hard mining.
Args:
proposal_boxlists: A list of `batch_size` BoxLists each representing
`self.max_num_proposals` representing decoded proposal bounding boxes
for each image.
second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage localization loss values.
second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage classification loss values.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
Returns:
second_stage_loc_loss: A scalar float32 tensor representing the second
stage localization loss.
second_stage_cls_loss: A scalar float32 tensor representing the second
stage classification loss.
"""
for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,
single_image_num_proposals) in zip(
proposal_boxlists,
tf.unstack(second_stage_loc_losses),
tf.unstack(second_stage_cls_losses),
tf.unstack(num_proposals)):
proposal_boxlist = box_list.BoxList(
tf.slice(proposal_boxlist.get(),
[0, 0], [single_image_num_proposals, -1]))
single_image_loc_loss = tf.slice(single_image_loc_loss,
[0], [single_image_num_proposals])
single_image_cls_loss = tf.slice(single_image_cls_loss,
[0], [single_image_num_proposals])
return self._hard_example_miner(
location_losses=tf.expand_dims(single_image_loc_loss, 0),
cls_losses=tf.expand_dims(single_image_cls_loss, 0),
decoded_boxlist_list=[proposal_boxlist])
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scopes are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is neither `classification`
nor `detection`.
"""
if fine_tune_checkpoint_type not in ['detection', 'classification']:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
if fine_tune_checkpoint_type == 'classification':
return self._feature_extractor.restore_from_classification_checkpoint_fn(
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope)
variables_to_restore = tf.global_variables()
variables_to_restore.append(slim.get_or_create_global_step())
# Only load feature extractor variables to be consistent with loading from
# a classification checkpoint.
include_patterns = None
if not load_all_detection_checkpoint_vars:
include_patterns = [
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope
]
feature_extractor_variables = tf.contrib.framework.filter_variables(
variables_to_restore, include_patterns=include_patterns)
return {var.op.name: var for var in feature_extractor_variables}
| 48.860565
| 181
| 0.713033
|
from abc import abstractmethod
from functools import partial
import tensorflow as tf
import json
import numpy as np
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.builders import box_predictor_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import box_predictor
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import post_processing
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
import sys sys.path.append("/notebooks/text-renderer/")
import data_util
slim = tf.contrib.slim
class FasterRCNNFeatureExtractor(object):
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._reuse_weights = reuse_weights
self._weight_decay = weight_decay
@abstractmethod
def preprocess(self, resized_inputs):
pass
def extract_proposal_features(self, preprocessed_inputs, scope):
with tf.variable_scope(scope, values=[preprocessed_inputs]):
return self._extract_proposal_features(preprocessed_inputs, scope)
@abstractmethod
def _extract_proposal_features(self, preprocessed_inputs, scope):
pass
def extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(
scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
@abstractmethod
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
pass
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
variables_to_restore = {}
for variable in tf.global_variables():
for scope_name in [first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace(scope_name + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class FasterRCNNMetaArchOverrideRPN(model.DetectionModel):
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_nms_score_threshold,
first_stage_nms_iou_threshold,
first_stage_max_proposals,
first_stage_proposals_path,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16,
add_summaries=True,
use_matmul_crop_and_resize=False,
clip_anchors_to_image=False):
print("Running FasterRCNN with overriden RPN")
super(FasterRCNNMetaArchOverrideRPN, self).__init__(num_classes=num_classes)
if (number_of_stages==1):
raise ValueError('Number of stages = 1 is not allowed for overriden RPN proposals')
if is_training and second_stage_batch_size > first_stage_max_proposals:
raise ValueError('second_stage_batch_size should be no greater than '
'first_stage_max_proposals.')
if not isinstance(first_stage_anchor_generator,
grid_anchor_generator.GridAnchorGenerator):
raise ValueError('first_stage_anchor_generator must be of type '
'grid_anchor_generator.GridAnchorGenerator.')
first_stage_proposals_path = os.path.join(first_stage_proposals_path, '')
xml_root = data_util.read_xml_batch(first_stage_proposals_path)[0]['annot']
_, self.proposals = data_util.xml_to_numpy(None, xml_root)
print("Shape of overriding proposals",self.proposals.shape)
self._is_training = is_training
self._image_resizer_fn = image_resizer_fn
self._feature_extractor = feature_extractor
self._number_of_stages = number_of_stages
self._proposal_target_assigner = first_stage_target_assigner
self._detector_target_assigner = second_stage_target_assigner
self._box_coder = self._proposal_target_assigner.box_coder
self._first_stage_anchor_generator = first_stage_anchor_generator
self._first_stage_atrous_rate = first_stage_atrous_rate
self._first_stage_box_predictor_arg_scope_fn = (
first_stage_box_predictor_arg_scope_fn)
self._first_stage_box_predictor_kernel_size = (
first_stage_box_predictor_kernel_size)
self._first_stage_box_predictor_depth = first_stage_box_predictor_depth
self._first_stage_minibatch_size = first_stage_minibatch_size
self._first_stage_sampler = first_stage_sampler
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0))
self._first_stage_nms_score_threshold = first_stage_nms_score_threshold
self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold
self._first_stage_max_proposals = first_stage_max_proposals
self._first_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._first_stage_objectness_loss = (
losses.WeightedSoftmaxClassificationLoss())
self._first_stage_loc_loss_weight = first_stage_localization_loss_weight
self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight
self._initial_crop_size = initial_crop_size
self._maxpool_kernel_size = maxpool_kernel_size
self._maxpool_stride = maxpool_stride
self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor
self._second_stage_batch_size = second_stage_batch_size
self._second_stage_sampler = second_stage_sampler
self._second_stage_nms_fn = second_stage_non_max_suppression_fn
self._second_stage_score_conversion_fn = second_stage_score_conversion_fn
self._second_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._second_stage_classification_loss = second_stage_classification_loss
self._second_stage_mask_loss = (
losses.WeightedSigmoidClassificationLoss())
self._second_stage_loc_loss_weight = second_stage_localization_loss_weight
self._second_stage_cls_loss_weight = second_stage_classification_loss_weight
self._second_stage_mask_loss_weight = (
second_stage_mask_prediction_loss_weight)
self._use_matmul_crop_and_resize = use_matmul_crop_and_resize
self._hard_example_miner = hard_example_miner
self._parallel_iterations = parallel_iterations
self.clip_anchors_to_image = clip_anchors_to_image
if self._number_of_stages <= 0 or self._number_of_stages > 3:
raise ValueError('Number of stages should be a value in {1, 2, 3}.')
@property
def first_stage_feature_extractor_scope(self):
return 'FirstStageFeatureExtractor'
@property
def second_stage_feature_extractor_scope(self):
return 'SecondStageFeatureExtractor'
@property
def first_stage_box_predictor_scope(self):
return 'FirstStageBoxPredictor'
@property
def second_stage_box_predictor_scope(self):
return 'SecondStageBoxPredictor'
@property
def max_num_proposals(self):
if self._is_training and not self._hard_example_miner:
return self._second_stage_batch_size
return self.proposals.shape[1]
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
def preprocess(self, inputs):
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32],
parallel_iterations=self._parallel_iterations)
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, image_shapes):
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.to_float(tf.stack([tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights),
clip_heights, clip_widths], axis=1))
return clip_window
def predict(self, preprocessed_inputs, true_image_shapes):
print("Predict running")
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(
preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
prediction_dict = {
'rpn_features_to_crop': rpn_features_to_crop,
'image_shape': image_shape,
}
if self._number_of_stages >= 2:
prediction_dict.update(self._predict_second_stage(
rpn_features_to_crop, image_shape, true_image_shapes))
if self._number_of_stages == 3:
prediction_dict = self._predict_third_stage(
prediction_dict, true_image_shapes)
return prediction_dict
def _image_batch_shape_2d(self, image_batch_shape_1d):
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _predict_second_stage(self, rpn_features_to_crop,
image_shape,
true_image_shapes):
image_shape_2d = self._image_batch_shape_2d(image_shape)
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
def to_absolute_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_absolute_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
proposal_boxes = tf.constant(self.proposals, dtype='float32')
proposal_boxes = shape_utils.static_or_dynamic_map_fn(
to_absolute_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)
num_proposals = tf.constant([proposal_boxes.shape[1]], dtype='int32')
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
placeholder_scores = tf.zeros((1, proposal_boxes.shape[1], 2))
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, _, num_proposals) = self._sample_box_classifier_batch(proposal_boxes, placeholder_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list, groundtruth_weights_list, true_image_shapes[0])
proposal_boxes_normalized = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, true_image_shapes], dtype=tf.float32)
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_proposal_feature_maps,
scope=self.second_stage_feature_extractor_scope))
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
}
return prediction_dict
def _predict_third_stage(self, prediction_dict, image_shapes):
if self._is_training:
curr_box_classifier_features = prediction_dict['box_classifier_features']
detection_classes = prediction_dict['class_predictions_with_background']
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
else:
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shapes)
prediction_dict.update(detections_dict)
detection_boxes = detections_dict[
fields.DetectionResultFields.detection_boxes]
detection_classes = detections_dict[
fields.DetectionResultFields.detection_classes]
rpn_features_to_crop = prediction_dict['rpn_features_to_crop']
batch_size = tf.shape(detection_boxes)[0]
max_detection = tf.shape(detection_boxes)[1]
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes))
curr_box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_detected_feature_maps,
scope=self.second_stage_feature_extractor_scope))
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
detection_masks = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
_, num_classes, mask_height, mask_width = (
detection_masks.get_shape().as_list())
_, max_detection = detection_classes.get_shape().as_list()
if num_classes > 1:
detection_masks = self._gather_instance_masks(
detection_masks, detection_classes)
prediction_dict[fields.DetectionResultFields.detection_masks] = (
tf.reshape(detection_masks,
[batch_size, max_detection, mask_height, mask_width]))
return prediction_dict
def _gather_instance_masks(self, instance_masks, classes):
_, num_classes, height, width = instance_masks.get_shape().as_list()
k = tf.shape(instance_masks)[0]
instance_masks = tf.reshape(instance_masks, [-1, height, width])
classes = tf.to_int32(tf.reshape(classes, [-1]))
gather_idx = tf.range(k) * num_classes + classes
return tf.gather(instance_masks, gather_idx)
def _extract_rpn_feature_maps(self, preprocessed_inputs):
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, _ = self._feature_extractor.extract_proposal_features(
preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors = box_list_ops.concatenate(
self._first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):
kernel_size = self._first_stage_box_predictor_kernel_size
rpn_box_predictor_features = slim.conv2d(
rpn_features_to_crop,
self._first_stage_box_predictor_depth,
kernel_size=[kernel_size, kernel_size],
rate=self._first_stage_atrous_rate,
activation_fn=tf.nn.relu6)
return (rpn_box_predictor_features, rpn_features_to_crop,
anchors, image_shape)
def _predict_rpn_proposals(self, rpn_box_predictor_features):
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
if self._first_stage_box_predictor.is_keras_model:
box_predictions = self._first_stage_box_predictor(
[rpn_box_predictor_features])
else:
box_predictions = self._first_stage_box_predictor.predict(
[rpn_box_predictor_features],
num_anchors_per_location,
scope=self.first_stage_box_predictor_scope)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (tf.squeeze(box_encodings, axis=2),
objectness_predictions_with_background)
def _remove_invalid_anchors_and_predictions(
self,
box_encodings,
objectness_predictions_with_background,
anchors_boxlist,
clip_window):
pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(
anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return shape_utils.static_or_dynamic_map_fn(
partial(tf.gather, indices=keep_indices),
elems=predictions_tensor,
dtype=tf.float32,
parallel_iterations=self._parallel_iterations,
back_prop=True)
return (_batch_gather_kept_indices(box_encodings),
_batch_gather_kept_indices(objectness_predictions_with_background),
pruned_anchors_boxlist)
def _flatten_first_two_dimensions(self, inputs):
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
def postprocess(self, prediction_dict, true_image_shapes):
with tf.name_scope('FirstStagePostprocessor'):
if self._number_of_stages == 1:
proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'],
true_image_shapes,
true_image_shapes)
return {
fields.DetectionResultFields.detection_boxes: proposal_boxes,
fields.DetectionResultFields.detection_scores: proposal_scores,
fields.DetectionResultFields.num_detections:
tf.to_float(num_proposals),
}
# TODO(jrru): Remove mask_predictions from _post_process_box_classifier.
with tf.name_scope('SecondStagePostprocessor'):
if (self._number_of_stages == 2 or
(self._number_of_stages == 3 and self._is_training)):
mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=mask_predictions)
return detections_dict
if self._number_of_stages == 3:
# Post processing is already performed in 3rd stage. We need to transfer
# postprocessed tensors from `prediction_dict` to `detections_dict`.
detections_dict = {}
for key in prediction_dict:
if key == fields.DetectionResultFields.detection_masks:
detections_dict[key] = tf.sigmoid(prediction_dict[key])
elif 'detection' in key:
detections_dict[key] = prediction_dict[key]
return detections_dict
def _postprocess_rpn(self,
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes,
true_image_shapes):
rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax_without_background = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)[:, :, 1]
clip_window = self._compute_clip_window(image_shapes)
(proposal_boxes, proposal_scores, _, _, _,
num_proposals) = post_processing.batch_multiclass_non_max_suppression(
tf.expand_dims(proposal_boxes, axis=2),
tf.expand_dims(rpn_objectness_softmax_without_background,
axis=2),
self._first_stage_nms_score_threshold,
self._first_stage_nms_iou_threshold,
self._first_stage_max_proposals,
self._first_stage_max_proposals,
clip_window=clip_window)
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, proposal_scores,
num_proposals) = self._sample_box_classifier_batch(
proposal_boxes, proposal_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
return normalized_proposal_boxes, proposal_scores, num_proposals
def _sample_box_classifier_batch(
self,
proposal_boxes,
proposal_scores,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
debug=None):
single_image_proposal_box_sample = []
single_image_proposal_score_sample = []
single_image_num_proposals_sample = []
for (single_image_proposal_boxes,
single_image_proposal_scores,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights) in zip(
tf.unstack(proposal_boxes),
tf.unstack(proposal_scores),
tf.unstack(num_proposals),
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)
single_image_boxlist.add_field(fields.BoxListFields.scores,
single_image_proposal_scores)
sampled_boxlist = self._sample_box_classifier_minibatch_single_image(
single_image_boxlist,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights,
debug)
# sampled_boxlist.set(tf.Print(sampled_boxlist.get(), [sampled_boxlist.num_boxes()], message="sample size "))
sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(
sampled_boxlist,
num_boxes=self._second_stage_batch_size)
single_image_num_proposals_sample.append(tf.minimum(
sampled_boxlist.num_boxes(),
self._second_stage_batch_size))
bb = sampled_padded_boxlist.get()
#bb = tf.Print(bb, [single_image_groundtruth_boxlist.num_boxes()], message=("After padding and num of GT" + str(bb.shape)))
single_image_proposal_box_sample.append(bb)
single_image_proposal_score_sample.append(
sampled_padded_boxlist.get_field(fields.BoxListFields.scores))
return (tf.stack(single_image_proposal_box_sample),
tf.stack(single_image_proposal_score_sample),
tf.stack(single_image_num_proposals_sample))
def _format_groundtruth_data(self, true_image_shapes, stage='detection'):
groundtruth_boxlists = [
box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), true_image_shapes[i, 0],
true_image_shapes[i, 1])
for i, boxes in enumerate(
self.groundtruth_lists(fields.BoxListFields.boxes))
]
groundtruth_classes_with_background_list = [
tf.to_float(
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))
for one_hot_encoding in self.groundtruth_lists(
fields.BoxListFields.classes)]
groundtruth_masks_list = self._groundtruth_lists.get(
fields.BoxListFields.masks)
if groundtruth_masks_list is not None:
resized_masks_list = []
for mask in groundtruth_masks_list:
_, resized_mask, _ = self._image_resizer_fn(
# Reuse the given `image_resizer_fn` to resize groundtruth masks.
# `mask` tensor for an image is of the shape [num_masks,
# image_height, image_width]. Below we create a dummy image of the
# the shape [image_height, image_width, 1] to use with
# `image_resizer_fn`.
image=tf.zeros(tf.stack([tf.shape(mask)[1], tf.shape(mask)[2], 1])),
masks=mask)
resized_masks_list.append(resized_mask)
groundtruth_masks_list = resized_masks_list
if self.groundtruth_has_field(fields.BoxListFields.weights):
groundtruth_weights_list = self.groundtruth_lists(
fields.BoxListFields.weights)
else:
# Set weights for all batch elements equally to 1.0
groundtruth_weights_list = []
for groundtruth_classes in groundtruth_classes_with_background_list:
num_gt = tf.shape(groundtruth_classes)[0]
groundtruth_weights = tf.ones(num_gt)
groundtruth_weights_list.append(groundtruth_weights)
return (groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list)
def _sample_box_classifier_minibatch_single_image(
self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,
groundtruth_classes_with_background, groundtruth_weights, debug=None):
(cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(
proposal_boxlist,
groundtruth_boxlist,
groundtruth_classes_with_background,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
groundtruth_weights=groundtruth_weights)
# Selects all boxes as candidates if none of them is selected according
# to cls_weights. This could happen as boxes within certain IOU ranges
# are ignored. If triggered, the selected boxes will still be ignored
# during loss computation.
positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)
# Debug target mapping
#positive_indicator = tf.Print(positive_indicator, [positive_indicator, box_list_ops.to_normalized_coordinates(groundtruth_boxlist, debug[0], debug[1]).get()], summarize=999999)
valid_indicator = tf.logical_and(
tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,
cls_weights > 0
)
sampled_indices = self._second_stage_sampler.subsample(
valid_indicator,
self._second_stage_batch_size,
positive_indicator)
return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
def _compute_second_stage_input_feature_maps(self, features_to_crop,
proposal_boxes_normalized):
def get_box_inds(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
if self._use_matmul_crop_and_resize:
def _single_image_crop_and_resize(inputs):
single_image_features_to_crop, proposal_boxes_normalized = inputs
return ops.matmul_crop_and_resize(
tf.expand_dims(single_image_features_to_crop, 0),
proposal_boxes_normalized,
[self._initial_crop_size, self._initial_crop_size])
cropped_regions = self._flatten_first_two_dimensions(
shape_utils.static_or_dynamic_map_fn(
_single_image_crop_and_resize,
elems=[features_to_crop, proposal_boxes_normalized],
dtype=tf.float32,
parallel_iterations=self._parallel_iterations))
else:
cropped_regions = tf.image.crop_and_resize(
features_to_crop,
self._flatten_first_two_dimensions(proposal_boxes_normalized),
get_box_inds(proposal_boxes_normalized),
(self._initial_crop_size, self._initial_crop_size))
return slim.max_pool2d(
cropped_regions,
[self._maxpool_kernel_size, self._maxpool_kernel_size], # Michele: Being specific to text, we want to preserve width more than height
stride=[self._maxpool_stride, 1])
def _postprocess_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
refined_box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1,
self.max_num_proposals,
refined_box_encodings.shape[1],
self._box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(
class_predictions_with_background,
[-1, self.max_num_proposals, self.num_classes + 1]
)
refined_decoded_boxes_batch = self._batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch = (
self._second_stage_score_conversion_fn(
class_predictions_with_background_batch))
class_predictions_batch = tf.reshape(
tf.slice(class_predictions_with_background_batch,
[0, 0, 1], [-1, -1, -1]),
[-1, self.max_num_proposals, self.num_classes])
clip_window = self._compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = mask_predictions.shape[2].value
mask_width = mask_predictions.shape[3].value
mask_predictions = tf.sigmoid(mask_predictions)
mask_predictions_batch = tf.reshape(
mask_predictions, [-1, self.max_num_proposals,
self.num_classes, mask_height, mask_width])
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,
num_detections) = self._second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
masks=mask_predictions_batch)
detections = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections: tf.to_float(num_detections)
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
num_classes = combined_shape[2]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
return tf.reshape(decoded_boxes.get(),
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
def loss(self, prediction_dict, true_image_shapes, scope=None):
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
#if self._number_of_stages > 1:
# loss_dict.update(
loss_dict = self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
prediction_dict['image_shape'],
prediction_dict.get('mask_predictions'),
groundtruth_masks_list,
)#)
return loss_dict
def _loss_rpn(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list):
with tf.name_scope('RPNLoss'):
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._proposal_target_assigner,
anchors_batch=box_list.BoxList(anchors),
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),
gt_weights_batch=groundtruth_weights_list)
batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)
def _minibatch_subsample_fn(inputs):
cls_targets, cls_weights = inputs
return self._first_stage_sampler.subsample(
tf.cast(cls_weights, tf.bool),
self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))
batch_sampled_indices = tf.to_float(shape_utils.static_or_dynamic_map_fn(
_minibatch_subsample_fn,
[batch_cls_targets, batch_cls_weights],
dtype=tf.bool,
parallel_iterations=self._parallel_iterations,
back_prop=True))
# Normalize by number of examples in sampled minibatch
normalizer = tf.reduce_sum(batch_sampled_indices, axis=1)
batch_one_hot_targets = tf.one_hot(
tf.to_int32(batch_cls_targets), depth=2)
sampled_reg_indices = tf.multiply(batch_sampled_indices,
batch_reg_weights)
localization_losses = self._first_stage_localization_loss(
rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices)
objectness_losses = self._first_stage_objectness_loss(
rpn_objectness_predictions_with_background,
batch_one_hot_targets, weights=batch_sampled_indices)
localization_loss = tf.reduce_mean(
tf.reduce_sum(localization_losses, axis=1) / normalizer)
objectness_loss = tf.reduce_mean(
tf.reduce_sum(objectness_losses, axis=1) / normalizer)
localization_loss = tf.multiply(self._first_stage_loc_loss_weight,
localization_loss,
name='localization_loss')
objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,
objectness_loss, name='objectness_loss')
loss_dict = {localization_loss.op.name: localization_loss,
objectness_loss.op.name: objectness_loss}
return loss_dict
def _loss_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
image_shape,
prediction_masks=None,
groundtruth_masks_list=None):
with tf.name_scope('BoxClassifierLoss'):
paddings_indicator = self._padded_batched_proposals_indicator(
num_proposals, self.max_num_proposals)
proposal_boxlists = [
box_list.BoxList(proposal_boxes_single_image)
for proposal_boxes_single_image in tf.unstack(proposal_boxes)]
batch_size = len(proposal_boxlists)
num_proposals_or_one = tf.to_float(tf.expand_dims(
tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))
normalizer = tf.tile(num_proposals_or_one,
[1, self.max_num_proposals]) * batch_size
(batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, self.max_num_proposals, -1])
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background,
[batch_size * self.max_num_proposals, -1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
# If using a shared box across classes use directly
if refined_box_encodings.shape[1] == 1:
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
# For anchors with multiple labels, picks refined_location_encodings
# for just one class to avoid over-counting for regression loss and
# (optionally) mask loss.
else:
# We only predict refined location encodings for the non background
# classes, but we now pad it to make it compatible with the class
# predictions
refined_box_encodings_with_background = tf.pad(
refined_box_encodings, [[0, 0], [1, 0], [0, 0]])
refined_box_encodings_masked_by_class_targets = tf.boolean_mask(
refined_box_encodings_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings_masked_by_class_targets,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
second_stage_loc_losses = self._second_stage_localization_loss(
reshaped_refined_box_encodings,
batch_reg_targets, weights=batch_reg_weights) / normalizer
second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_classification_loss(
class_predictions_with_background,
batch_cls_targets_with_background,
weights=batch_cls_weights),
ndims=2) / normalizer
second_stage_loc_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_loc_losses, paddings_indicator))
second_stage_cls_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_cls_losses, paddings_indicator))
if self._hard_example_miner:
(second_stage_loc_loss, second_stage_cls_loss
) = self._unpad_proposals_and_apply_hard_mining(
proposal_boxlists, second_stage_loc_losses,
second_stage_cls_losses, num_proposals)
localization_loss = tf.multiply(self._second_stage_loc_loss_weight,
second_stage_loc_loss,
name='localization_loss')
classification_loss = tf.multiply(self._second_stage_cls_loss_weight,
second_stage_cls_loss,
name='classification_loss')
loss_dict = {localization_loss.op.name: localization_loss,
classification_loss.op.name: classification_loss}
second_stage_mask_loss = None
if prediction_masks is not None:
if groundtruth_masks_list is None:
raise ValueError('Groundtruth instance masks not provided. '
'Please configure input reader.')
unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)
(batch_mask_targets, _, _, batch_mask_target_weights,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_masks_list,
unmatched_class_label=unmatched_mask_label,
gt_weights_batch=groundtruth_weights_list)
# Pad the prediction_masks with to add zeros for background class to be
# consistent with class predictions.
if prediction_masks.get_shape().as_list()[1] == 1:
# Class agnostic masks or masks for one-class prediction. Logic for
# both cases is the same since background predictions are ignored
# through the batch_mask_target_weights.
prediction_masks_masked_by_class_targets = prediction_masks
else:
prediction_masks_with_background = tf.pad(
prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])
prediction_masks_masked_by_class_targets = tf.boolean_mask(
prediction_masks_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
mask_height = prediction_masks.shape[2].value
mask_width = prediction_masks.shape[3].value
reshaped_prediction_masks = tf.reshape(
prediction_masks_masked_by_class_targets,
[batch_size, -1, mask_height * mask_width])
batch_mask_targets_shape = tf.shape(batch_mask_targets)
flat_gt_masks = tf.reshape(batch_mask_targets,
[-1, batch_mask_targets_shape[2],
batch_mask_targets_shape[3]])
# Use normalized proposals to crop mask targets from image masks.
flat_normalized_proposals = box_list_ops.to_normalized_coordinates(
box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),
image_shape[1], image_shape[2]).get()
flat_cropped_gt_mask = tf.image.crop_and_resize(
tf.expand_dims(flat_gt_masks, -1),
flat_normalized_proposals,
tf.range(flat_normalized_proposals.shape[0].value),
[mask_height, mask_width])
batch_cropped_gt_mask = tf.reshape(
flat_cropped_gt_mask,
[batch_size, -1, mask_height * mask_width])
second_stage_mask_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_mask_loss(
reshaped_prediction_masks,
batch_cropped_gt_mask,
weights=batch_mask_target_weights),
ndims=2) / (
mask_height * mask_width * tf.maximum(
tf.reduce_sum(
batch_mask_target_weights, axis=1, keep_dims=True
), tf.ones((batch_size, 1))))
second_stage_mask_loss = tf.reduce_sum(
tf.boolean_mask(second_stage_mask_losses, paddings_indicator))
if second_stage_mask_loss is not None:
mask_loss = tf.multiply(self._second_stage_mask_loss_weight,
second_stage_mask_loss, name='mask_loss')
loss_dict[mask_loss.op.name] = mask_loss
return loss_dict
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
def _unpad_proposals_and_apply_hard_mining(self,
proposal_boxlists,
second_stage_loc_losses,
second_stage_cls_losses,
num_proposals):
for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,
single_image_num_proposals) in zip(
proposal_boxlists,
tf.unstack(second_stage_loc_losses),
tf.unstack(second_stage_cls_losses),
tf.unstack(num_proposals)):
proposal_boxlist = box_list.BoxList(
tf.slice(proposal_boxlist.get(),
[0, 0], [single_image_num_proposals, -1]))
single_image_loc_loss = tf.slice(single_image_loc_loss,
[0], [single_image_num_proposals])
single_image_cls_loss = tf.slice(single_image_cls_loss,
[0], [single_image_num_proposals])
return self._hard_example_miner(
location_losses=tf.expand_dims(single_image_loc_loss, 0),
cls_losses=tf.expand_dims(single_image_cls_loss, 0),
decoded_boxlist_list=[proposal_boxlist])
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
if fine_tune_checkpoint_type not in ['detection', 'classification']:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
if fine_tune_checkpoint_type == 'classification':
return self._feature_extractor.restore_from_classification_checkpoint_fn(
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope)
variables_to_restore = tf.global_variables()
variables_to_restore.append(slim.get_or_create_global_step())
# Only load feature extractor variables to be consistent with loading from
# a classification checkpoint.
include_patterns = None
if not load_all_detection_checkpoint_vars:
include_patterns = [
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope
]
feature_extractor_variables = tf.contrib.framework.filter_variables(
variables_to_restore, include_patterns=include_patterns)
return {var.op.name: var for var in feature_extractor_variables}
| true
| true
|
f7015aaa826888850cd8fe4abef03d3587111370
| 643
|
py
|
Python
|
manage.py
|
lievertom/2020.2-Projeto-Kokama-Traducao
|
c9c164ec69a3eba58fcbe0a74a43601346e57755
|
[
"MIT"
] | 1
|
2021-03-13T02:44:27.000Z
|
2021-03-13T02:44:27.000Z
|
manage.py
|
luisgaboardi/2020.2-Projeto-Kokama-Traducao
|
c14edf0611f6dcc7dc6de3249018de9c5cf71604
|
[
"MIT"
] | 21
|
2021-03-14T01:51:11.000Z
|
2021-05-25T02:04:23.000Z
|
manage.py
|
lievertom/2020.2-Projeto-Kokama-Traducao
|
c9c164ec69a3eba58fcbe0a74a43601346e57755
|
[
"MIT"
] | 3
|
2021-04-02T11:06:33.000Z
|
2021-05-12T21:30:34.000Z
|
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'translate.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.227273
| 73
| 0.679627
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'translate.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
f7015abd02d2e91af592e552a6b9c0a139f233d9
| 9,314
|
py
|
Python
|
monero_glue_test/test_crypto.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 20
|
2018-04-05T22:06:10.000Z
|
2021-09-18T10:43:44.000Z
|
monero_glue_test/test_crypto.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | null | null | null |
monero_glue_test/test_crypto.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 5
|
2018-08-06T15:06:04.000Z
|
2021-07-16T01:58:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05, 2018
import binascii
from binascii import unhexlify
import unittest
import aiounittest
from monero_glue.xmr import common, crypto
from monero_glue.xmr.core import ec_py
class CryptoTest(aiounittest.AsyncTestCase):
"""Simple tests"""
def __init__(self, *args, **kwargs):
super(CryptoTest, self).__init__(*args, **kwargs)
def test_ed_crypto(self):
sqr = ec_py.fe_expmod(ec_py.py_fe_sqrtm1, 2)
self.assertEqual(sqr, ec_py.fe_mod(-1))
self.assertEqual(
ec_py.py_fe_A, ec_py.fe_mod(2 * (1 - ec_py.d) * ec_py.inv(1 + ec_py.py_d))
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb1, 2),
ec_py.fe_mod(-2 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb2, 2),
ec_py.fe_mod(2 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb3, 2),
ec_py.fe_mod(-ec_py.py_fe_sqrtm1 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb4, 2),
ec_py.fe_mod(ec_py.py_fe_sqrtm1 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
def test_encoding(self):
point = unhexlify(
b"2486224797d05cae3cba4be043be2db0df381f3f19cfa113f86ab38e3d8d2bd0"
)
self.assertEqual(point, crypto.encodepoint(crypto.decodepoint(point)))
self.assertTrue(
crypto.point_eq(
crypto.decodepoint(point),
crypto.decodepoint(crypto.encodepoint(crypto.decodepoint(point))),
)
)
def test_scalarmult_base(self):
scalar = crypto.decodeint(
unhexlify(
b"a0eea49140a3b036da30eacf64bd9d56ce3ef68ba82ef13571ec511edbcf8303"
)
)
exp = unhexlify(
b"16bb4a3c44e2ced511fc0d4cd86b13b3af21efc99fb0356199fac489f2544c09"
)
res = crypto.scalarmult_base(scalar)
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
scalar = crypto.decodeint(
unhexlify(
b"fd290dce39f781aebbdbd24584ed6d48bd300de19d9c3decfda0a6e2c6751d0f"
)
)
exp = unhexlify(
b"123daf90fc26f13c6529e6b49bfed498995ac383ef19c0db6771143f24ba8dd5"
)
res = crypto.scalarmult_base(scalar)
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
def test_scalarmult(self):
priv = unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
pub = unhexlify(
b"2486224797d05cae3cba4be043be2db0df381f3f19cfa113f86ab38e3d8d2bd0"
)
exp = unhexlify(
b"adcd1f5881f46f254900a03c654e71950a88a0236fa0a3a946c9b8daed6ef43d"
)
res = crypto.scalarmult(crypto.decodepoint(pub), crypto.decodeint(priv))
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
def test_cn_fast_hash(self):
inp = unhexlify(
b"259ef2aba8feb473cf39058a0fe30b9ff6d245b42b6826687ebd6b63128aff6405"
)
res = crypto.cn_fast_hash(inp)
self.assertEqual(
res,
unhexlify(
b"86db87b83fb1246efca5f3b0db09ce3fa4d605b0d10e6507cac253dd31a3ec16"
),
)
def test_hash_to_scalar(self):
inp = unhexlify(
b"259ef2aba8feb473cf39058a0fe30b9ff6d245b42b6826687ebd6b63128aff6405"
)
res = crypto.hash_to_scalar(inp)
exp = crypto.decodeint(binascii.unhexlify(
b"9907925b254e12162609fc0dfd0fef2aa4d605b0d10e6507cac253dd31a3ec06"))
self.assertTrue(crypto.sc_eq(res, exp))
def test_hash_to_point(self):
data = unhexlify(
b"42f6835bf83114a1f5f6076fe79bdfa0bd67c74b88f127d54572d3910dd09201"
)
res = crypto.hash_to_point(data)
res_p = crypto.encodepoint(res)
self.assertEqual(
res_p,
unhexlify(
b"54863a0464c008acc99cffb179bc6cf34eb1bbdf6c29f7a070a7c6376ae30ab5"
),
)
def test_derivation_to_scalar(self):
derivation = unhexlify(
b"e720a09f2e3a0bbf4e4ba7ad93653bb296885510121f806acb2a5f9168fafa01"
)
scalar = unhexlify(
b"25d08763414c379aa9cf989cdcb3cadd36bd5193b500107d6bf5f921f18e470e"
)
sc_int = crypto.derivation_to_scalar(crypto.decodepoint(derivation), 0)
self.assertEqual(scalar, crypto.encodeint(sc_int))
def test_generate_key_derivation(self):
key_pub = crypto.decodepoint(
unhexlify(
b"7739c95d3298e2f87362dba9e0e0b3980a692ae8e2f16796b0e382098cd6bd83"
)
)
key_priv = crypto.decodeint(
unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
)
deriv_exp = unhexlify(
b"fa188a45a0e4daccc0e6d4f6f6858fd46392104be74183ec0047e7e9f4eaf739"
)
self.assertEqual(
deriv_exp,
crypto.encodepoint(crypto.generate_key_derivation(key_pub, key_priv)),
)
def test_h(self):
H = unhexlify(
b"8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94"
)
self.assertEqual(crypto.encodepoint(crypto.xmr_H()), H)
def test_h_pow(self):
hp = crypto.gen_Hpow(10)
self.assertEqual(crypto.encodepoint(hp[0]), crypto.encodepoint(crypto.xmr_H()))
for i in range(1, 10):
crypto.check_ed25519point(hp[i])
self.assertEqual(
crypto.encodepoint(hp[i]),
crypto.encodepoint(
crypto.scalarmult(crypto.xmr_H(), crypto.sc_init(2 ** i))
),
)
def test_signature(self):
for i in range(10):
priv = crypto.random_scalar()
data = crypto.cn_fast_hash(bytes(bytearray([i])))
c, r, pub = crypto.generate_signature(data, priv)
res = crypto.check_signature(data, c, r, pub)
self.assertEqual(res, 1)
res2 = crypto.check_signature(
data, crypto.sc_add(c, crypto.sc_init(1)), r, pub
)
self.assertEqual(res2, 0)
def test_edhex(self):
inputs = [crypto.q - 2 ** 9, crypto.q - 10, 0, 100, 2 ** 200 + 10] + [
common.rand.randrange(0, crypto.q - 2) for _ in range(20)
]
for x in inputs:
l = crypto.encode_ed25519(x)
d = crypto.decode_ed25519(l)
self.assertEqual(x, d)
def test_modm(self):
inputs = [crypto.l - 2 ** 9, crypto.l - 10, 0, 100, 2 ** 200 + 10] + [
common.rand.randrange(0, crypto.l - 2) for _ in range(20)
]
for x in inputs:
l = crypto.encode_modm(x)
d = crypto.decode_modm(l)
self.assertEqual(x, d)
def test_ge25519_double_scalarmult_vartime2(self):
for i in range(10):
ap = crypto.random_scalar()
bp = crypto.random_scalar()
A = crypto.scalarmult_base(ap)
B = crypto.scalarmult_base(bp)
a = crypto.random_scalar()
b = crypto.random_scalar()
R = crypto.ge_double_scalarmult_base_vartime2(a, A, b, B)
R_exp = crypto.point_add(crypto.scalarmult(A, a), crypto.scalarmult(B, b))
self.assertTrue(crypto.point_eq(R, R_exp))
def test_ge25519_double_scalarmult_vartime(self):
for i in range(10):
ap = crypto.random_scalar()
A = crypto.scalarmult_base(ap)
a = crypto.random_scalar()
b = crypto.random_scalar()
R = crypto.ge_double_scalarmult_base_vartime(a, A, b)
R_exp = crypto.point_add(crypto.scalarmult(A, a), crypto.scalarmult_base(b))
self.assertTrue(crypto.point_eq(R, R_exp))
def test_pointadd(self):
a = crypto.random_scalar()
A = crypto.scalarmult_base(a)
A2 = crypto.point_add(A, A)
A3 = crypto.point_add(A2, A)
A4 = crypto.point_add(A3, A)
A8 = crypto.scalarmult(A4, crypto.sc_init(2))
A8p = crypto.point_mul8(A)
self.assertTrue(crypto.point_eq(A8p, A8))
self.assertTrue(crypto.point_eq(A4, crypto.scalarmult(A, crypto.sc_init(4))))
self.assertTrue(crypto.point_eq(A3, crypto.scalarmult(A, crypto.sc_init(3))))
def test_sc_inversion(self):
res = crypto.new_scalar()
inp = crypto.decodeint(
unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
)
crypto.sc_inv_into(res, inp)
self.assertEqual(
binascii.hexlify(crypto.encodeint(res)),
b"bcf365a551e6358f3f281a6241d4a25eded60230b60a1d48c67b51a85e33d70e",
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 35.280303
| 88
| 0.613915
|
import binascii
from binascii import unhexlify
import unittest
import aiounittest
from monero_glue.xmr import common, crypto
from monero_glue.xmr.core import ec_py
class CryptoTest(aiounittest.AsyncTestCase):
def __init__(self, *args, **kwargs):
super(CryptoTest, self).__init__(*args, **kwargs)
def test_ed_crypto(self):
sqr = ec_py.fe_expmod(ec_py.py_fe_sqrtm1, 2)
self.assertEqual(sqr, ec_py.fe_mod(-1))
self.assertEqual(
ec_py.py_fe_A, ec_py.fe_mod(2 * (1 - ec_py.d) * ec_py.inv(1 + ec_py.py_d))
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb1, 2),
ec_py.fe_mod(-2 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb2, 2),
ec_py.fe_mod(2 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb3, 2),
ec_py.fe_mod(-ec_py.py_fe_sqrtm1 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
self.assertEqual(
ec_py.fe_expmod(ec_py.py_fe_fffb4, 2),
ec_py.fe_mod(ec_py.py_fe_sqrtm1 * ec_py.py_fe_A * (ec_py.py_fe_A + 2)),
)
def test_encoding(self):
point = unhexlify(
b"2486224797d05cae3cba4be043be2db0df381f3f19cfa113f86ab38e3d8d2bd0"
)
self.assertEqual(point, crypto.encodepoint(crypto.decodepoint(point)))
self.assertTrue(
crypto.point_eq(
crypto.decodepoint(point),
crypto.decodepoint(crypto.encodepoint(crypto.decodepoint(point))),
)
)
def test_scalarmult_base(self):
scalar = crypto.decodeint(
unhexlify(
b"a0eea49140a3b036da30eacf64bd9d56ce3ef68ba82ef13571ec511edbcf8303"
)
)
exp = unhexlify(
b"16bb4a3c44e2ced511fc0d4cd86b13b3af21efc99fb0356199fac489f2544c09"
)
res = crypto.scalarmult_base(scalar)
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
scalar = crypto.decodeint(
unhexlify(
b"fd290dce39f781aebbdbd24584ed6d48bd300de19d9c3decfda0a6e2c6751d0f"
)
)
exp = unhexlify(
b"123daf90fc26f13c6529e6b49bfed498995ac383ef19c0db6771143f24ba8dd5"
)
res = crypto.scalarmult_base(scalar)
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
def test_scalarmult(self):
priv = unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
pub = unhexlify(
b"2486224797d05cae3cba4be043be2db0df381f3f19cfa113f86ab38e3d8d2bd0"
)
exp = unhexlify(
b"adcd1f5881f46f254900a03c654e71950a88a0236fa0a3a946c9b8daed6ef43d"
)
res = crypto.scalarmult(crypto.decodepoint(pub), crypto.decodeint(priv))
self.assertEqual(exp, crypto.encodepoint(res))
self.assertTrue(crypto.point_eq(crypto.decodepoint(exp), res))
def test_cn_fast_hash(self):
inp = unhexlify(
b"259ef2aba8feb473cf39058a0fe30b9ff6d245b42b6826687ebd6b63128aff6405"
)
res = crypto.cn_fast_hash(inp)
self.assertEqual(
res,
unhexlify(
b"86db87b83fb1246efca5f3b0db09ce3fa4d605b0d10e6507cac253dd31a3ec16"
),
)
def test_hash_to_scalar(self):
inp = unhexlify(
b"259ef2aba8feb473cf39058a0fe30b9ff6d245b42b6826687ebd6b63128aff6405"
)
res = crypto.hash_to_scalar(inp)
exp = crypto.decodeint(binascii.unhexlify(
b"9907925b254e12162609fc0dfd0fef2aa4d605b0d10e6507cac253dd31a3ec06"))
self.assertTrue(crypto.sc_eq(res, exp))
def test_hash_to_point(self):
data = unhexlify(
b"42f6835bf83114a1f5f6076fe79bdfa0bd67c74b88f127d54572d3910dd09201"
)
res = crypto.hash_to_point(data)
res_p = crypto.encodepoint(res)
self.assertEqual(
res_p,
unhexlify(
b"54863a0464c008acc99cffb179bc6cf34eb1bbdf6c29f7a070a7c6376ae30ab5"
),
)
def test_derivation_to_scalar(self):
derivation = unhexlify(
b"e720a09f2e3a0bbf4e4ba7ad93653bb296885510121f806acb2a5f9168fafa01"
)
scalar = unhexlify(
b"25d08763414c379aa9cf989cdcb3cadd36bd5193b500107d6bf5f921f18e470e"
)
sc_int = crypto.derivation_to_scalar(crypto.decodepoint(derivation), 0)
self.assertEqual(scalar, crypto.encodeint(sc_int))
def test_generate_key_derivation(self):
key_pub = crypto.decodepoint(
unhexlify(
b"7739c95d3298e2f87362dba9e0e0b3980a692ae8e2f16796b0e382098cd6bd83"
)
)
key_priv = crypto.decodeint(
unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
)
deriv_exp = unhexlify(
b"fa188a45a0e4daccc0e6d4f6f6858fd46392104be74183ec0047e7e9f4eaf739"
)
self.assertEqual(
deriv_exp,
crypto.encodepoint(crypto.generate_key_derivation(key_pub, key_priv)),
)
def test_h(self):
H = unhexlify(
b"8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94"
)
self.assertEqual(crypto.encodepoint(crypto.xmr_H()), H)
def test_h_pow(self):
hp = crypto.gen_Hpow(10)
self.assertEqual(crypto.encodepoint(hp[0]), crypto.encodepoint(crypto.xmr_H()))
for i in range(1, 10):
crypto.check_ed25519point(hp[i])
self.assertEqual(
crypto.encodepoint(hp[i]),
crypto.encodepoint(
crypto.scalarmult(crypto.xmr_H(), crypto.sc_init(2 ** i))
),
)
def test_signature(self):
for i in range(10):
priv = crypto.random_scalar()
data = crypto.cn_fast_hash(bytes(bytearray([i])))
c, r, pub = crypto.generate_signature(data, priv)
res = crypto.check_signature(data, c, r, pub)
self.assertEqual(res, 1)
res2 = crypto.check_signature(
data, crypto.sc_add(c, crypto.sc_init(1)), r, pub
)
self.assertEqual(res2, 0)
def test_edhex(self):
inputs = [crypto.q - 2 ** 9, crypto.q - 10, 0, 100, 2 ** 200 + 10] + [
common.rand.randrange(0, crypto.q - 2) for _ in range(20)
]
for x in inputs:
l = crypto.encode_ed25519(x)
d = crypto.decode_ed25519(l)
self.assertEqual(x, d)
def test_modm(self):
inputs = [crypto.l - 2 ** 9, crypto.l - 10, 0, 100, 2 ** 200 + 10] + [
common.rand.randrange(0, crypto.l - 2) for _ in range(20)
]
for x in inputs:
l = crypto.encode_modm(x)
d = crypto.decode_modm(l)
self.assertEqual(x, d)
def test_ge25519_double_scalarmult_vartime2(self):
for i in range(10):
ap = crypto.random_scalar()
bp = crypto.random_scalar()
A = crypto.scalarmult_base(ap)
B = crypto.scalarmult_base(bp)
a = crypto.random_scalar()
b = crypto.random_scalar()
R = crypto.ge_double_scalarmult_base_vartime2(a, A, b, B)
R_exp = crypto.point_add(crypto.scalarmult(A, a), crypto.scalarmult(B, b))
self.assertTrue(crypto.point_eq(R, R_exp))
def test_ge25519_double_scalarmult_vartime(self):
for i in range(10):
ap = crypto.random_scalar()
A = crypto.scalarmult_base(ap)
a = crypto.random_scalar()
b = crypto.random_scalar()
R = crypto.ge_double_scalarmult_base_vartime(a, A, b)
R_exp = crypto.point_add(crypto.scalarmult(A, a), crypto.scalarmult_base(b))
self.assertTrue(crypto.point_eq(R, R_exp))
def test_pointadd(self):
a = crypto.random_scalar()
A = crypto.scalarmult_base(a)
A2 = crypto.point_add(A, A)
A3 = crypto.point_add(A2, A)
A4 = crypto.point_add(A3, A)
A8 = crypto.scalarmult(A4, crypto.sc_init(2))
A8p = crypto.point_mul8(A)
self.assertTrue(crypto.point_eq(A8p, A8))
self.assertTrue(crypto.point_eq(A4, crypto.scalarmult(A, crypto.sc_init(4))))
self.assertTrue(crypto.point_eq(A3, crypto.scalarmult(A, crypto.sc_init(3))))
def test_sc_inversion(self):
res = crypto.new_scalar()
inp = crypto.decodeint(
unhexlify(
b"3482fb9735ef879fcae5ec7721b5d3646e155c4fb58d6cc11c732c9c9b76620a"
)
)
crypto.sc_inv_into(res, inp)
self.assertEqual(
binascii.hexlify(crypto.encodeint(res)),
b"bcf365a551e6358f3f281a6241d4a25eded60230b60a1d48c67b51a85e33d70e",
)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f7015b1b5a99de73f33b4f3d76ad8353923e1995
| 175,051
|
py
|
Python
|
python/ccxt/async_support/gateio.py
|
ttodua/ccxt-2
|
d23db0c44288b3fc0313e798912a1c52c5b42f97
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/gateio.py
|
ttodua/ccxt-2
|
d23db0c44288b3fc0313e798912a1c52c5b42f97
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/gateio.py
|
ttodua/ccxt-2
|
d23db0c44288b3fc0313e798912a1c52c5b42f97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, # 300 requests per second or 3.33ms
'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': {
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
'private': {
'withdrawals': 'https://api.gateio.ws/api/v4',
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
},
'test': {
'public': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
'private': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'createPostOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': False,
'createStopOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchNetworkDepositAddress': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactionFees': True,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': False,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'wallet': {
'get': {
'wallet/currency_chains': 1.5,
},
},
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
'funding_book': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
'options': {
'get': {
'underlyings': 1.5,
'expirations': 1.5,
'contracts': 1.5,
'contracts/{contract}': 1.5,
'settlements': 1.5,
'settlements/{contract}': 1.5,
'order_book': 1.5,
'tickers': 1.5,
'underlying/tickers/{underlying}': 1.5,
'candlesticks': 1.5,
'underlying/candlesticks': 1.5,
'trades': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, # 3000 = 10 seconds
},
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
'total_balance': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
'loan_records/{loan_record_id}': 1.5,
'borrowable': 1.5,
'cross/repayments': 1.5,
'cross/borrowable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
'cross/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/settlements': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'options': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'positions': 1.5,
'positions/{contract}': 1.5,
'position_close': 1.5,
'orders': 1.5,
'orders/{order_id}': 1.5,
'my_trades': 1.5,
},
'post': {
'orders': 1.5,
},
'delete': {
'orders': 1.5,
'orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
'1w': '7d',
},
# copied from gateiov2
'commonCurrencies': {
'88MPH': 'MPH',
'AXIS': 'Axis DeFi',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'EGG': 'Goose Finance',
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MM': 'Million', # conflict with MilliMeter
'MPH': 'Morpher', # conflict with 88MPH
'RAI': 'Rai Reflex Index', # conflict with RAI Finance
'SBTC': 'Super Bitcoin',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'headers': {
'X-Gate-Channel-Id': 'ccxt',
},
'options': {
'createOrder': {
'expiration': 86400, # for conditional orders
},
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'funding': 'spot',
'spot': 'spot',
'margin': 'margin',
'cross_margin': 'cross_margin',
'cross': 'cross_margin',
'isolated': 'margin',
'swap': 'futures',
'future': 'delivery',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'future': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
# volume is in BTC
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
# https://www.gate.io/docs/apiv4/en/index.html#label-list
'exceptions': {
'exact': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'CLIENT_ID_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': AccountNotEnabled,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeNotAvailable,
'SERVER_ERROR': ExchangeNotAvailable,
'TOO_BUSY': ExchangeNotAvailable,
'CROSS_ACCOUNT_NOT_FOUND': ExchangeError,
},
},
'broad': {},
})
async def fetch_markets(self, params={}):
result = []
type, query = self.handle_market_type_and_params('fetchMarkets', None, params)
if type == 'spot' or type == 'margin':
result = await self.fetch_spot_markets(query)
if type == 'swap' or type == 'future':
result = await self.fetch_contract_markets(query) # futures and swaps
if type == 'option':
result = await self.fetch_option_markets(query)
resultLength = len(result)
if resultLength == 0:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap', 'future' or 'option'") # eslint-disable-line quotes
return result
async def fetch_spot_markets(self, params):
marginResponse = await self.publicMarginGetCurrencyPairs(params)
spotMarketsResponse = await self.publicSpotGetCurrencyPairs(params)
marginMarkets = self.index_by(marginResponse, 'id')
#
# Spot
#
# [
# {
# "id": "QTUM_ETH",
# "base": "QTUM",
# "quote": "ETH",
# "fee": "0.2",
# "min_base_amount": "0.01",
# "min_quote_amount": "0.001",
# "amount_precision": 3,
# "precision": 6,
# "trade_status": "tradable",
# "sell_start": 0,
# "buy_start": 0
# }
# ]
#
# Margin
#
# [
# {
# "id": "ETH_USDT",
# "base": "ETH",
# "quote": "USDT",
# "leverage": 3,
# "min_base_amount": "0.01",
# "min_quote_amount": "100",
# "max_quote_amount": "1000000"
# }
# ]
#
result = []
for i in range(0, len(spotMarketsResponse)):
spotMarket = spotMarketsResponse[i]
id = self.safe_string(spotMarket, 'id')
marginMarket = self.safe_value(marginMarkets, id)
market = self.deep_extend(marginMarket, spotMarket)
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
tradeStatus = self.safe_string(market, 'trade_status')
leverage = self.safe_number(market, 'leverage')
defaultMinAmountLimit = self.parse_number(self.parse_precision(amountPrecisionString))
margin = leverage is not None
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': margin,
'swap': False,
'future': False,
'option': False,
'active': (tradeStatus == 'tradable'),
'contract': False,
'linear': None,
'inverse': None,
# Fee is in %, so divide by 100
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(amountPrecisionString)),
'price': self.parse_number(self.parse_precision(pricePrecisionString)),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'leverage', 1),
},
'amount': {
'min': self.safe_number(spotMarket, 'min_base_amount', defaultMinAmountLimit),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': self.safe_number(market, 'max_quote_amount'),
},
},
'info': market,
})
return result
async def fetch_contract_markets(self, params):
result = []
swapSettlementCurrencies = self.get_settlement_currencies('swap', 'fetchMarkets')
futureSettlementCurrencies = self.get_settlement_currencies('future', 'fetchMarkets')
for c in range(0, len(swapSettlementCurrencies)):
settleId = swapSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicFuturesGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
for c in range(0, len(futureSettlementCurrencies)):
settleId = futureSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicDeliveryGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
return result
def parse_contract_market(self, market, settleId):
#
# Perpetual swap
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
# Delivery Futures
#
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
#
id = self.safe_string(market, 'name')
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
date = self.safe_string(parts, 2)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
expiry = self.safe_timestamp(market, 'expire_time')
symbol = ''
marketType = 'swap'
if date is not None:
symbol = base + '/' + quote + ':' + settle + '-' + self.yymmdd(expiry, '')
marketType = 'future'
else:
symbol = base + '/' + quote + ':' + settle
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
isLinear = quote == settle
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': marketType,
'spot': False,
'margin': False,
'swap': marketType == 'swap',
'future': marketType == 'future',
'option': marketType == 'option',
'active': True,
'contract': True,
'linear': isLinear,
'inverse': not isLinear,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), # Fee is in %, so divide by 100
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.safe_number(market, 'quanto_multiplier'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
async def fetch_option_markets(self, params={}):
result = []
underlyings = await self.fetch_option_underlyings()
for i in range(0, len(underlyings)):
underlying = underlyings[i]
query = params
query['underlying'] = underlying
response = await self.publicOptionsGetContracts(query)
#
# [
# {
# "orders_limit": "50",
# "order_size_max": "100000",
# "mark_price_round": "0.1",
# "order_size_min": "1",
# "position_limit": "1000000",
# "orderbook_id": "575967",
# "order_price_deviate": "0.9",
# "is_call": True, # True means Call False means Put
# "last_price": "93.9",
# "bid1_size": "0",
# "bid1_price": "0",
# "taker_fee_rate": "0.0004",
# "underlying": "BTC_USDT",
# "create_time": "1646381188",
# "price_limit_fee_rate": "0.1",
# "maker_fee_rate": "0.0004",
# "trade_id": "727",
# "order_price_round": "0.1",
# "settle_fee_rate": "0.0001",
# "trade_size": "1982",
# "ref_rebate_rate": "0",
# "name": "BTC_USDT-20220311-44000-C",
# "underlying_price": "39194.26",
# "strike_price": "44000",
# "multiplier": "0.0001",
# "ask1_price": "0",
# "ref_discount_rate": "0",
# "expiration_time": "1646985600",
# "mark_price": "12.15",
# "position_size": "4",
# "ask1_size": "0",
# "tag": "WEEK"
# }
# ]
#
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
parts = underlying.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
expiry = self.safe_timestamp(market, 'expiration_time')
strike = self.safe_string(market, 'strike_price')
isCall = self.safe_value(market, 'is_call')
optionLetter = 'C' if isCall else 'P'
optionType = 'call' if isCall else 'put'
symbol = symbol + ':' + quote + '-' + self.yymmdd(expiry) + ':' + strike + ':' + optionLetter
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': quote,
'baseId': baseId,
'quoteId': quoteId,
'settleId': quoteId,
'type': 'option',
'spot': False,
'margin': False,
'swap': False,
'future': False,
'option': True,
'active': True,
'contract': True,
'linear': True,
'inverse': False,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), # Fee is in %, so divide by 100
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.parse_number('1'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': strike,
'optionType': optionType,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
async def fetch_option_underlyings(self):
underlyingsResponse = await self.publicOptionsGetUnderlyings()
#
# [
# {
# "index_time": "1646915796",
# "name": "BTC_USDT",
# "index_price": "39142.73"
# }
# ]
#
underlyings = []
for i in range(0, len(underlyingsResponse)):
underlying = underlyingsResponse[i]
name = self.safe_string(underlying, 'name')
if name is not None:
underlyings.append(name)
return underlyings
def prepare_request(self, market=None, type=None, params={}):
"""
* @ignore
Fills request params contract, settle, currency_pair, market and account where applicable
:param dict market: CCXT market, required when type is None
:param str type: 'spot', 'swap', or 'future', required when market is None
:param dict params: request parameters
:returns: the api request object, and the new params object with non-needed parameters removed
"""
# * Do not call for multi spot order methods like cancelAllOrders and fetchOpenOrders. Use multiOrderSpotPrepareRequest instead
request = {}
if market is not None:
if market['contract']:
request['contract'] = market['id']
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
else:
swap = type == 'swap'
future = type == 'future'
if swap or future:
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
params = self.omit(params, 'settle')
request['settle'] = settle
return [request, params]
def spot_order_prepare_request(self, market=None, stop=False, params={}):
"""
* @ignore
Fills request params currency_pair, market and account where applicable for spot order methods like fetchOpenOrders, cancelAllOrders
:param dict market: CCXT market
:param bool stop: True if for a stop order
:param dict params: request parameters
:returns: the api request object, and the new params object with non-needed parameters removed
"""
marginMode, query = self.get_margin_mode(stop, params)
request = {}
if not stop:
if market is None:
raise ArgumentsRequired(self.id + ' spotOrderPrepareRequest() requires a market argument for non-stop orders')
request['account'] = marginMode
request['currency_pair'] = market['id'] # Should always be set for non-stop
return [request, query]
def multi_order_spot_prepare_request(self, market=None, stop=False, params={}):
"""
* @ignore
Fills request params currency_pair, market and account where applicable for spot order methods like fetchOpenOrders, cancelAllOrders
:param dict market: CCXT market
:param bool stop: True if for a stop order
:param dict params: request parameters
:returns: the api request object, and the new params object with non-needed parameters removed
"""
marginMode, query = self.get_margin_mode(stop, params)
request = {
'account': marginMode,
}
if market is not None:
if stop:
# gateio spot and margin stop orders use the term market instead of currency_pair, and normal instead of spot. Neither parameter is used when fetching/cancelling a single order. They are used for creating a single stop order, but createOrder does not call self method
request['market'] = market['id']
else:
request['currency_pair'] = market['id']
return [request, query]
def get_margin_mode(self, stop, params):
"""
* @ignore
Gets the margin type for self api call
:param bool stop: True if for a stop order
:param dict params: Request params
:returns: The marginMode and the updated request params with marginMode removed, marginMode value is the value that can be read by the "account" property specified in gateios api docs
"""
defaultMarginMode = self.safe_string_lower_2(self.options, 'defaultMarginMode', 'marginMode', 'spot') # 'margin' is isolated margin on gateio's api
marginMode = self.safe_string_lower_2(params, 'marginMode', 'account', defaultMarginMode)
params = self.omit(params, ['marginMode', 'account'])
if marginMode == 'cross':
marginMode = 'cross_margin'
elif marginMode == 'isolated':
marginMode = 'margin'
elif marginMode == '':
marginMode = 'spot'
if stop:
if marginMode == 'spot':
# gateio spot stop orders use the term normal instead of spot
marginMode = 'normal'
if marginMode == 'cross_margin':
raise BadRequest(self.id + ' getMarginMode() does not support stop orders for cross margin')
return [marginMode, params]
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = ['usdt'] if (type == 'swap') else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
async def fetch_currencies(self, params={}):
# sandbox/testnet only supports future markets
apiBackup = self.safe_value(self.urls, 'apiBackup')
if apiBackup is not None:
return None
response = await self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
currencyIdLower = self.safe_string_lower(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdrawDisabled = self.safe_value(entry, 'withdraw_disabled', False)
depositDisabled = self.safe_value(entry, 'deposit_disabled', False)
tradeDisabled = self.safe_value(entry, 'trade_disabled', False)
withdrawEnabled = not withdrawDisabled
depositEnabled = not depositDisabled
tradeEnabled = not tradeDisabled
listed = not delisted
active = listed and tradeEnabled and withdrawEnabled and depositEnabled
result[code] = {
'id': currencyId,
'lowerCaseId': currencyIdLower,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRate() supports swap contracts only')
request, query = self.prepare_request(market, None, params)
response = await self.publicFuturesGetSettleContractsContract(self.extend(request, query))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
async def fetch_funding_rates(self, symbols=None, params={}):
await self.load_markets()
request, query = self.prepare_request(None, 'swap', params)
response = await self.publicFuturesGetSettleContracts(self.extend(request, query))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_number(contract, 'funding_rate')
fundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': fundingRate,
'fundingTimestamp': fundingTime,
'fundingDatetime': self.iso8601(fundingTime),
'nextFundingRate': fundingRateIndicative,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
async def fetch_network_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency_pair': market['id'],
}
response = await self.privateWalletGetFee(self.extend(request, params))
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fee(response, market)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fees(response)
def parse_trading_fees(self, response):
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.market(symbol)
result[symbol] = self.parse_trading_fee(response, market)
return result
def parse_trading_fee(self, info, market=None):
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
contract = self.safe_value(market, 'contract')
takerKey = 'futures_taker_fee' if contract else 'taker_fee'
makerKey = 'futures_maker_fee' if contract else 'maker_fee'
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'maker': self.safe_number(info, makerKey),
'taker': self.safe_number(info, takerKey),
}
async def fetch_transaction_fees(self, codes=None, params={}):
await self.load_markets()
response = await self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
async def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
# defaultType = 'future'
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('fetchFundingHistory', market, params)
request, requestParams = self.prepare_request(market, type, query)
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since / 1000
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettleAccountBook',
'future': 'privateDeliveryGetSettleAccountBook',
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# [
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# },
# ...
# ]
#
return self.parse_funding_histories(response, symbol, since, limit)
def parse_funding_histories(self, response, symbol, since, limit):
result = []
for i in range(0, len(response)):
entry = response[i]
funding = self.parse_funding_history(entry)
result.append(funding)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def parse_funding_history(self, info, market=None):
#
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# }
#
timestamp = self.safe_timestamp(info, 'time')
marketId = self.safe_string(info, 'text')
market = self.safe_market(marketId, market)
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'code': self.safe_string(market, 'settle'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(info, 'change'),
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
'margin': 'publicSpotGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'future': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
request['with_id'] = True
response = await getattr(self, method)(self.extend(request, query))
#
# SPOT
#
# {
# "id": 6358770031
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids": [
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "id": 6358770031
# "current": 1634350208.745,
# "asks": [
# {"s": 24909, "p": "61264.8"},
# {"s": 81, "p": "61266.6"},
# {"s": 2000, "p": "61267.6"},
# {"s": 490, "p": "61270.2"},
# {"s": 12, "p": "61270.4"},
# {"s": 11782, "p": "61273.2"},
# {"s": 14666, "p": "61273.3"},
# {"s": 22541, "p": "61273.4"},
# {"s": 33, "p": "61273.6"},
# {"s": 11980, "p": "61274.5"}
# ],
# "bids": [
# {"s": 41844, "p": "61264.7"},
# {"s": 13783, "p": "61263.3"},
# {"s": 1143, "p": "61259.8"},
# {"s": 81, "p": "61258.7"},
# {"s": 2471, "p": "61257.8"},
# {"s": 2471, "p": "61257.7"},
# {"s": 2471, "p": "61256.5"},
# {"s": 3, "p": "61254.2"},
# {"s": 114, "p": "61252.4"},
# {"s": 14372, "p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not market['spot']:
timestamp = timestamp * 1000
priceKey = 0 if market['spot'] else 'p'
amountKey = 1 if market['spot'] else 's'
nonce = self.safe_integer(response, 'id')
result = self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
result['nonce'] = nonce
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, query))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'last')
ask = self.safe_string(ticker, 'lowest_ask')
bid = self.safe_string(ticker, 'highest_bid')
high = self.safe_string(ticker, 'high_24h')
low = self.safe_string(ticker, 'low_24h')
baseVolume = self.safe_string_2(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_string_2(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_string(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchTickers', None, params)
request, requestParams = self.prepare_request(None, type, query)
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_tickers(response, symbols)
def fetch_balance_helper(self, entry):
account = self.account()
account['used'] = self.safe_string_2(entry, 'freeze', 'locked')
account['free'] = self.safe_string(entry, 'available')
account['total'] = self.safe_string(entry, 'total')
return account
async def fetch_balance(self, params={}):
"""
:param dict params: exchange specific parameters
:param str params['type']: spot, margin, swap or future, if not provided self.options['defaultType'] is used
:param str params['settle']: 'btc' or 'usdt' - settle currency for perpetual swap and future - default="usdt" for swap and "btc" for future
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for margin trading if not provided self.options['defaultMarginMode'] is used
:param str params['symbol']: margin only - unified ccxt symbol
"""
await self.load_markets()
symbol = self.safe_string(params, 'symbol')
params = self.omit(params, 'symbol')
type, query = self.handle_market_type_and_params('fetchBalance', None, params)
request, requestParams = self.prepare_request(None, type, query)
marginMode, requestQuery = self.get_margin_mode(False, requestParams)
if symbol is not None:
market = self.market(symbol)
request['currency_pair'] = market['id']
method = self.get_supported_mapping(type, {
'spot': self.get_supported_mapping(marginMode, {
'spot': 'privateSpotGetAccounts',
'margin': 'privateMarginGetAccounts',
'cross_margin': 'privateMarginGetCrossAccounts',
}),
'funding': 'privateMarginGetFundingAccounts',
'swap': 'privateFuturesGetSettleAccounts',
'future': 'privateDeliveryGetSettleAccounts',
})
response = await getattr(self, method)(self.extend(request, requestQuery))
contract = (type == 'swap' or type == 'future')
if contract:
response = [response]
#
# Spot / margin funding
#
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# "lent": "0", # margin funding only
# "total_lent": "0" # margin funding only
# },
# ...
# ]
#
# Margin
#
# [
# {
# "currency_pair": "DOGE_USDT",
# "locked": False,
# "risk": "9999.99",
# "base": {
# "currency": "DOGE",
# "available": "0",
# "locked": "0",
# "borrowed": "0",
# "interest": "0"
# },
# "quote": {
# "currency": "USDT",
# "available": "0.73402",
# "locked": "0",
# "borrowed": "0",
# "interest": "0"
# }
# },
# ...
# ]
#
# Cross margin
#
# {
# "user_id": 10406147,
# "locked": False,
# "balances": {
# "USDT": {
# "available": "1",
# "freeze": "0",
# "borrowed": "0",
# "interest": "0"
# }
# },
# "total": "1",
# "borrowed": "0",
# "interest": "0",
# "risk": "9999.99"
# }
#
# Perpetual Swap
#
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
#
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
#
result = {
'info': response,
}
crossMargin = marginMode == 'cross_margin'
margin = marginMode == 'margin'
data = response
if 'balances' in data: # True for cross_margin
flatBalances = []
balances = self.safe_value(data, 'balances', [])
# inject currency and create an artificial balance object
# so it can follow the existent flow
keys = list(balances.keys())
for i in range(0, len(keys)):
currencyId = keys[i]
content = balances[currencyId]
content['currency'] = currencyId
flatBalances.append(content)
data = flatBalances
for i in range(0, len(data)):
entry = data[i]
if margin and not crossMargin:
marketId = self.safe_string(entry, 'currency_pair')
symbol = self.safe_symbol(marketId, None, '_')
base = self.safe_value(entry, 'base', {})
quote = self.safe_value(entry, 'quote', {})
baseCode = self.safe_currency_code(self.safe_string(base, 'currency', {}))
quoteCode = self.safe_currency_code(self.safe_string(quote, 'currency', {}))
subResult = {}
subResult[baseCode] = self.fetch_balance_helper(base)
subResult[quoteCode] = self.fetch_balance_helper(quote)
result[symbol] = self.safe_balance(subResult)
else:
code = self.safe_currency_code(self.safe_string(entry, 'currency', {}))
result[code] = self.fetch_balance_helper(entry)
return result if (margin and not crossMargin) else self.safe_balance(result)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = {}
request, params = self.prepare_request(market, None, params)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
maxLimit = 1999
limit = maxLimit if (limit is None) else min(limit, maxLimit)
if market['future']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
else:
maxLimit = 1000
limit = maxLimit if (limit is None) else min(limit, maxLimit)
request['limit'] = limit
if since is not None:
duration = self.parse_timeframe(timeframe)
request['from'] = int(since / 1000)
toTimestamp = self.sum(request['from'], limit * duration - 1)
currentTimestamp = self.seconds()
request['to'] = min(toTimestamp, currentTimestamp)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRateHistory() supports swap contracts only')
request, query = self.prepare_request(market, None, params)
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = await getattr(self, method)(self.extend(request, query))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, future
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
'margin': 'publicSpotGetTrades',
'swap': 'publicFuturesGetSettleTrades',
'future': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
Fetch personal trading history
:param str symbol: The symbol for the market to fetch trades for
:param int since: The earliest timestamp, in ms, that fetched trades were made
:param int limit: The max number of trades to fetch
:param dict params: Exchange specific parameters
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for margin trading if not provided self.options['defaultMarginMode'] is used
:param str params['type']: 'spot', 'swap', or 'future', if not provided self.options['defaultMarginMode'] is used
:param int params['till']: The latest timestamp, in ms, that fetched trades were made
:param int params['page']: *spot only* Page number
:param str params['order_id']: *spot only* Filter trades with specified order ID. symbol is also required if self field is present
:param str params['order']: *contract only* Futures order ID, return related data only if specified
:param int params['offset']: *contract only* list offset, starting from 0
:param str params['last_id']: *contract only* specify list staring point using the id of last record in previous list-query results
:param int params['count_total']: *contract only* whether to return total number matched, default to 0(no return)
:returns: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
type = None
marginMode = None
request = {}
market = self.market(symbol) if (symbol is not None) else None
till = self.safe_number(params, 'till')
params = self.omit(params, 'till')
type, params = self.handle_market_type_and_params('fetchMyTrades', market, params)
contract = (type == 'swap') or (type == 'future')
if contract:
request, params = self.prepare_request(market, type, params)
else:
if market is not None:
request['currency_pair'] = market['id'] # Should always be set for non-stop
marginMode, params = self.get_margin_mode(False, params)
request['account'] = marginMode
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
if till is not None:
request['to'] = int(till / 1000)
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetMyTrades',
'margin': 'privateSpotGetMyTrades',
'swap': 'privateFuturesGetSettleMyTrades',
'future': 'privateDeliveryGetSettleMyTrades',
})
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# "id": "2876130500",
# "create_time": "1645464610",
# "create_time_ms": "1645464610777.399200",
# "currency_pair": "DOGE_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "10.97",
# "price": "0.137384",
# "order_id": "125924049993",
# "fee": "0.00301420496",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0"
# }
# ]
#
# perpetual swap
#
# [
# {
# "size": -5,
# "order_id": "130264979823",
# "id": 26884791,
# "role": "taker",
# "create_time": 1645465199.5472,
# "contract": "DOGE_USDT",
# "price": "0.136888"
# }
# ]
#
# future
#
# [
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# public ws
#
# {
# id: 221994511,
# time: 1580311438.618647,
# price: '9309',
# amount: '0.0019',
# type: 'sell'
# }
#
# spot rest
#
# {
# "id": "2876130500",
# "create_time": "1645464610",
# "create_time_ms": "1645464610777.399200",
# "currency_pair": "DOGE_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "10.97",
# "price": "0.137384",
# "order_id": "125924049993",
# "fee": "0.00301420496",
# "fee_currency": "USDT",
# "point_fee": "0","gt_fee":"0"
# }
#
# perpetual swap rest
#
# {
# "size": -5,
# "order_id": "130264979823",
# "id": 26884791,
# "role": "taker",
# "create_time": 1645465199.5472,
# "contract": "DOGE_USDT",
# "price": "0.136888"
# }
#
# future rest
#
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp_2(trade, 'time', 'create_time')
timestamp = self.safe_integer(trade, 'create_time_ms', timestamp)
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
side = self.safe_string_2(trade, 'side', 'type', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCostString = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCostString = self.safe_string(trade, 'fee')
else:
feeCurrency = 'GT'
feeCostString = gtFee
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = await self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
return self.parse_transaction(response, currency)
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
'BCODE': 'ok', # GateCode withdrawal
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdraw
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
id = self.safe_string(transaction, 'id')
type = None
amount = self.safe_string(transaction, 'amount')
if id[0] == 'b':
# GateCode handling
type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal'
amount = Precise.string_abs(amount)
elif id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': self.parse_number(amount),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'updated': None,
'fee': fee,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
Create an order on the exchange
:param str symbol: Unified CCXT market symbol
:param str type: "limit" or "market" *"market" is contract only*
:param str side: "buy" or "sell"
:param float amount: the amount of currency to trade
:param float price: *ignored in "market" orders* the price at which the order is to be fullfilled at in units of the quote currency
:param dict params: Extra parameters specific to the exchange API endpoint
:param float params['stopPrice']: The price at which a trigger order is triggered at
:param str params['timeInForce']: "GTC", "IOC", or "PO"
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for margin trading if not provided self.options['defaultMarginMode'] is used
:param int params['iceberg']: Amount to display for the iceberg order, Null or 0 for normal orders, Set to -1 to hide the order completely
:param str params['text']: User defined information
:param str params['account']: *spot and margin only* "spot", "margin" or "cross_margin"
:param bool params['auto_borrow']: *margin only* Used in margin or cross margin trading to allow automatic loan of insufficient amount if balance is not enough
:param str params['settle']: *contract only* Unified Currency Code for settle currency
:param bool params['reduceOnly']: *contract only* Indicates if self order is to reduce the size of a position
:param bool params['close']: *contract only* Set as True to close the position, with size set to 0
:param bool params['auto_size']: *contract only* Set side to close dual-mode position, close_long closes the long side, while close_short the short one, size also needs to be set to 0
:returns: `An order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
contract = market['contract']
stopPrice = self.safe_number(params, 'stopPrice')
methodTail = 'Orders'
reduceOnly = self.safe_value_2(params, 'reduce_only', 'reduceOnly')
defaultTimeInForce = self.safe_value_2(params, 'tif', 'time_in_force', 'gtc')
timeInForce = self.safe_value(params, 'timeInForce', defaultTimeInForce)
postOnly = False
type, postOnly, timeInForce, params = self.is_post_only(type, timeInForce, None, params)
params = self.omit(params, ['stopPrice', 'reduce_only', 'reduceOnly', 'tif', 'time_in_force', 'timeInForce'])
if postOnly:
timeInForce = 'poc'
isLimitOrder = (type == 'limit')
isMarketOrder = (type == 'market')
if isLimitOrder and price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for ' + type + ' orders')
if contract:
amountToPrecision = self.amount_to_precision(symbol, amount)
signedAmount = Precise.string_neg(amountToPrecision) if (side == 'sell') else amountToPrecision
amount = int(signedAmount)
if isMarketOrder:
timeInForce = 'ioc'
price = 0
elif not isLimitOrder:
# Gateio doesn't have market orders for spot
raise InvalidOrder(self.id + ' createOrder() does not support ' + type + ' orders for ' + market['type'] + ' markets')
request = None
trigger = self.safe_value(params, 'trigger')
if stopPrice is None and trigger is None:
if contract:
# contract order
request = {
'contract': market['id'], # filled in prepareRequest above
'size': amount, # int64, positive = bid, negative = ask
# 'iceberg': 0, # int64, display size for iceberg order, 0 for non-iceberg, note that you will have to pay the taker fee for the hidden size
'price': self.price_to_precision(symbol, price), # 0 for market order with tif set as ioc
# 'close': False, # True to close the position, with size set to 0
# 'reduce_only': False, # St as True to be reduce-only order
# 'tif': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'text': clientOrderId, # 't-abcdef1234567890',
# 'auto_size': '', # close_long, close_short, note size also needs to be set to 0
'settle': market['settleId'], # filled in prepareRequest above
}
if reduceOnly is not None:
request['reduce_only'] = reduceOnly
if timeInForce is not None:
request['tif'] = timeInForce
else:
marginMode = None
marginMode, params = self.get_margin_mode(False, params)
# spot order
request = {
# 'text': clientOrderId, # 't-abcdef1234567890',
'currency_pair': market['id'], # filled in prepareRequest above
'type': type,
'account': marginMode, # 'spot', 'margin', 'cross_margin'
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
# 'time_in_force': 'gtc', # gtc, ioc, poc PendingOrCancelled == postOnly order
# 'iceberg': 0, # amount to display for the iceberg order, null or 0 for normal orders, set to -1 to hide the order completely
# 'auto_borrow': False, # used in margin or cross margin trading to allow automatic loan of insufficient amount if balance is not enough
# 'auto_repay': False, # automatic repayment for automatic borrow loan generated by cross margin order, diabled by default
}
if timeInForce is not None:
request['time_in_force'] = timeInForce
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
if clientOrderId is not None:
# user-defined, must follow the rules if not empty
# prefixed with t-
# no longer than 28 bytes without t- prefix
# can only include 0-9, A-Z, a-z, underscores(_), hyphens(-) or dots(.)
if len(clientOrderId) > 28:
raise BadRequest(self.id + ' createOrder() clientOrderId or text param must be up to 28 characters')
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
request['text'] = clientOrderId
else:
if contract:
# contract conditional order
rule = 1 if (side == 'buy') else 2
request = {
'initial': {
'contract': market['id'],
'size': amount, # positive = buy, negative = sell, set to 0 to close the position
'price': self.price_to_precision(symbol, price), # set to 0 to use market price
# 'close': False, # set to True if trying to close the position
# 'tif': 'gtc', # gtc, ioc, if using market price, only ioc is supported
# 'text': clientOrderId, # web, api, app
# 'reduce_only': False,
},
'trigger': {
# 'strategy_type': 0, # 0 = by price, 1 = by price gap, only 0 is supported currently
# 'price_type': 0, # 0 latest deal price, 1 mark price, 2 index price
'price': self.price_to_precision(symbol, stopPrice), # price or gap
'rule': rule, # 1 means price_type >= price, 2 means price_type <= price
# 'expiration': expiration, how many seconds to wait for the condition to be triggered before cancelling the order
},
'settle': market['settleId'],
}
expiration = self.safe_integer(params, 'expiration')
if expiration is not None:
request['trigger']['expiration'] = expiration
params = self.omit(params, 'expiration')
if reduceOnly is not None:
request['initial']['reduce_only'] = reduceOnly
if timeInForce is not None:
request['initial']['tif'] = timeInForce
else:
# spot conditional order
options = self.safe_value(self.options, 'createOrder', {})
marginMode = None
marginMode, params = self.get_margin_mode(True, params)
defaultExpiration = self.safe_integer(options, 'expiration')
expiration = self.safe_integer(params, 'expiration', defaultExpiration)
rule = '>=' if (side == 'buy') else '<='
triggerPrice = self.safe_value(trigger, 'price', stopPrice)
request = {
'trigger': {
'price': self.price_to_precision(symbol, triggerPrice),
'rule': rule, # >= triggered when market price larger than or equal to price field, <= triggered when market price less than or equal to price field
'expiration': expiration, # required, how long(in seconds) to wait for the condition to be triggered before cancelling the order
},
'put': {
'type': type,
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'account': marginMode,
'time_in_force': timeInForce, # gtc, ioc for taker only
},
'market': market['id'],
}
methodTail = 'PriceOrders'
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPost' + methodTail,
'margin': 'privateSpotPost' + methodTail,
'swap': 'privateFuturesPostSettle' + methodTail,
'future': 'privateDeliveryPostSettle' + methodTail,
})
response = await getattr(self, method)(self.deep_extend(request, params))
#
# spot
#
# {
# "id": "95282841887",
# "text": "apiv4",
# "create_time": "1637383156",
# "update_time": "1637383156",
# "create_time_ms": 1637383156017,
# "update_time_ms": 1637383156017,
# "status": "open",
# "currency_pair": "ETH_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.01",
# "price": "3500",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.01",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
# spot conditional
#
# {"id": 5891843}
#
# future and perpetual swaps
#
# {
# "id": 95938572327,
# "contract": "ETH_USDT",
# "mkfr": "0",
# "tkfr": "0.0005",
# "tif": "gtc",
# "is_reduce_only": False,
# "create_time": 1637384600.08,
# "price": "3000",
# "size": 1,
# "refr": "0",
# "left": 1,
# "text": "api",
# "fill_price": "0",
# "user": 2436035,
# "status": "open",
# "is_liq": False,
# "refu": 0,
# "is_close": False,
# "iceberg": 0
# }
#
# futures and perpetual swaps conditionals
#
# {"id": 7615567}
#
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'_new': 'open',
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# SPOT
# createOrder/cancelOrder/fetchOrder
#
# {
# "id": "62364648575",
# "text": "apiv4",
# "create_time": "1626354834",
# "update_time": "1626354834",
# "create_time_ms": "1626354833544",
# "update_time_ms": "1626354833544",
# "status": "open",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.0001",
# "price": "30000",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0001",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "BTC",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": True,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
# SPOT TRIGGER ORDERS
# createOrder
#
# {
# "id": 12604556
# }
#
# fetchOrder/cancelOrder
#
# {
# "market": "ADA_USDT",
# "user": 6392049,
# "trigger": {
# "price": "1.08", # stopPrice
# "rule": "\u003e=",
# "expiration": 86400
# },
# "put": {
# "type": "limit",
# "side": "buy",
# "price": "1.08", # order price
# "amount": "1.00000000000000000000",
# "account": "normal",
# "time_in_force": "gtc"
# },
# "id": 71639298,
# "ctime": 1643945985,
# "status": "open"
# }
#
# FUTURE AND SWAP
# createOrder/cancelOrder/fetchOrder
#
# {
# "id": 123028481731,
# "contract": "ADA_USDT",
# "mkfr": "-0.00005",
# "tkfr": "0.00048",
# "tif": "ioc",
# "is_reduce_only": False,
# "create_time": 1643950262.68,
# "finish_time": 1643950262.68,
# "price": "0",
# "size": 1,
# "refr": "0",
# "left":0,
# "text": "api",
# "fill_price": "1.05273",
# "user":6329238,
# "finish_as": "filled",
# "status": "finished",
# "is_liq": False,
# "refu":0,
# "is_close": False,
# "iceberg": 0
# }
#
# TRIGGER ORDERS(FUTURE AND SWAP)
# createOrder
#
# {
# "id": 12604556
# }
#
# fetchOrder/cancelOrder
#
# {
# "user": 6320300,
# "trigger": {
# "strategy_type": 0,
# "price_type": 0,
# "price": "1.03", # stopPrice
# "rule": 2,
# "expiration": 0
# },
# "initial": {
# "contract": "ADA_USDT",
# "size": -1,
# "price": "1.02",
# "tif": "gtc",
# "text": "",
# "iceberg": 0,
# "is_close": False,
# "is_reduce_only": False,
# "auto_size": ""
# },
# "id": 126393906,
# "trade_id": 0,
# "status": "open",
# "reason": "",
# "create_time": 1643953482,
# "finish_time": 1643953482,
# "is_stop_order": False,
# "stop_trigger": {
# "rule": 0,
# "trigger_price": "",
# "order_price": ""
# },
# "me_order_id": 0,
# "order_type": ""
# }
#
put = self.safe_value_2(order, 'put', 'initial')
trigger = self.safe_value(order, 'trigger')
contract = self.safe_string(put, 'contract')
type = self.safe_string(put, 'type')
timeInForce = self.safe_string_upper_2(put, 'time_in_force', 'tif')
amount = self.safe_string_2(put, 'amount', 'size')
side = self.safe_string(put, 'side')
price = self.safe_string(put, 'price')
contract = self.safe_string(order, 'contract', contract)
type = self.safe_string(order, 'type', type)
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif', timeInForce)
if timeInForce == 'POC':
timeInForce = 'PO'
postOnly = (timeInForce == 'PO')
amount = self.safe_string_2(order, 'amount', 'size', amount)
side = self.safe_string(order, 'side', side)
price = self.safe_string(order, 'price', price)
remaining = self.safe_string(order, 'left')
filled = Precise.string_sub(amount, remaining)
cost = self.safe_string(order, 'filled_total')
rawStatus = None
average = None
if put:
remaining = amount
filled = '0'
cost = '0'
if contract:
isMarketOrder = Precise.string_equals(price, '0') and (timeInForce == 'IOC')
type = 'market' if isMarketOrder else 'limit'
side = 'buy' if Precise.string_gt(amount, '0') else 'sell'
rawStatus = self.safe_string(order, 'finish_as', 'open')
average = self.safe_number(order, 'fill_price')
else:
rawStatus = self.safe_string(order, 'status')
timestamp = self.safe_integer(order, 'create_time_ms')
if timestamp is None:
timestamp = self.safe_timestamp_2(order, 'create_time', 'ctime')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms')
if lastTradeTimestamp is None:
lastTradeTimestamp = self.safe_timestamp_2(order, 'update_time', 'finish_time')
exchangeSymbol = self.safe_string_2(order, 'currency_pair', 'market', contract)
# Everything below self(above return) is related to fees
fees = []
gtFee = self.safe_string(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_string(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': Precise.string_neg(rebate),
})
numFeeCurrencies = len(fees)
multipleFeeCurrencies = numFeeCurrencies > 1
status = self.parse_order_status(rawStatus)
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': self.safe_string(order, 'text'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': self.safe_symbol(exchangeSymbol),
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': self.parse_number(price),
'stopPrice': self.safe_number(trigger, 'price'),
'average': average,
'amount': self.parse_number(Precise.string_abs(amount)),
'cost': Precise.string_abs(cost),
'filled': self.parse_number(Precise.string_abs(filled)),
'remaining': self.parse_number(Precise.string_abs(remaining)),
'fee': None if multipleFeeCurrencies else self.safe_value(fees, 0),
'fees': fees if multipleFeeCurrencies else [],
'trades': None,
'info': order,
}, market)
async def create_reduce_only_order(self, symbol, type, side, amount, price=None, params={}):
request = {
'reduceOnly': True,
}
return await self.create_order(symbol, type, side, amount, price, self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
"""
Retrieves information on an order
:param str id: Order id
:param str symbol: Unified market symbol, *required for spot and margin*
:param dict params: Parameters specified by the exchange api
:param bool params['stop']: True if the order being fetched is a trigger order
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for margin trading if not provided self.options['defaultMarginMode'] is used
:param str params['type']: 'spot', 'swap', or 'future', if not provided self.options['defaultMarginMode'] is used
:param str params['settle']: 'btc' or 'usdt' - settle currency for perpetual swap and future - market settle currency is used if symbol is not None, default="usdt" for swap and "btc" for future
:returns: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
orderId = id
if clientOrderId is not None:
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
orderId = clientOrderId
market = None if (symbol is None) else self.market(symbol)
type, query = self.handle_market_type_and_params('fetchOrder', market, params)
contract = (type == 'swap') or (type == 'future')
request, requestParams = self.prepare_request(market, type, query) if contract else self.spot_order_prepare_request(market, stop, query)
request['order_id'] = orderId
methodMiddle = 'PriceOrders' if stop else 'Orders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGet' + methodMiddle + 'OrderId',
'margin': 'privateSpotGet' + methodMiddle + 'OrderId',
'swap': 'privateFuturesGetSettle' + methodMiddle + 'OrderId',
'future': 'privateDeliveryGetSettle' + methodMiddle + 'OrderId',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_order(response, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches all open orders
:param str symbol: Unified market symbol
:param int since: earliest time in ms for orders in the response
:param int limit: max number of order structures to return
:param dict params: exchange specific params
:param bool params['stop']: True for fetching stop orders
:param str params['type']: spot, margin, swap or future, if not provided self.options['defaultType'] is used
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for type='margin', if not provided self.options['defaultMarginMode'] is used
:returns: An array of order structures
"""
return await self.fetch_orders_by_status('open', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches all closed orders
:param str symbol: Unified market symbol of the market to fetch orders for
:param int since: earliest time in ms for orders in the response
:param int limit: max number of order structures to return
:param dict params: exchange specific params
:param bool params['stop']: True for fetching stop orders
:param str params['type']: spot, swap or future, if not provided self.options['defaultType'] is used
:param str params['marginMode']: 'cross' or 'isolated' - marginMode for margin trading if not provided self.options['defaultMarginMode'] is used
:returns: An array of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
return await self.fetch_orders_by_status('finished', symbol, since, limit, params)
async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value(params, 'stop')
params = self.omit(params, 'stop')
type, query = self.handle_market_type_and_params('fetchOrdersByStatus', market, params)
spot = (type == 'spot') or (type == 'margin')
request, requestParams = self.multi_order_spot_prepare_request(market, stop, query) if spot else self.prepare_request(market, type, query)
if status == 'closed':
status = 'finished'
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and spot:
request['from'] = int(since / 1000)
methodTail = 'PriceOrders' if stop else 'Orders'
openSpotOrders = spot and (status == 'open') and not stop
if openSpotOrders:
methodTail = 'OpenOrders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGet' + methodTail,
'margin': 'privateSpotGet' + methodTail,
'swap': 'privateFuturesGetSettle' + methodTail,
'future': 'privateDeliveryGetSettle' + methodTail,
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# SPOT Open Orders
#
# [
# {
# "currency_pair": "ADA_USDT",
# "total": 2,
# "orders": [
# {
# "id": "155498539874",
# "text": "apiv4",
# "create_time": "1652406843",
# "update_time": "1652406843",
# "create_time_ms": 1652406843295,
# "update_time_ms": 1652406843295,
# "status": "open",
# "currency_pair": "ADA_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "3",
# "price": "0.35",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "3",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "ADA",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# },
# ...
# ]
# },
# ...
# ]
#
# SPOT
#
# [
# {
# "id": "8834234273",
# "text": "3",
# "create_time": "1635406193",
# "update_time": "1635406193",
# "create_time_ms": 1635406193361,
# "update_time_ms": 1635406193361,
# "status": "closed",
# "currency_pair": "BTC_USDT",
# "type": "limit",
# "account": "spot", # margin for margin orders
# "side": "sell",
# "amount": "0.0002",
# "price": "58904.01",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.0000",
# "fill_price": "11.790516",
# "filled_total": "11.790516",
# "fee": "0.023581032",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee_currency": "BTC"
# }
# ]
#
# Spot Stop
#
# [
# {
# "market": "ADA_USDT",
# "user": 10406147,
# "trigger": {
# "price": "0.65",
# "rule": "\u003c=",
# "expiration": 86400
# },
# "put": {
# "type": "limit",
# "side": "sell",
# "price": "0.65",
# "amount": "2.00000000000000000000",
# "account": "normal", # margin for margin orders
# "time_in_force": "gtc"
# },
# "id": 8449909,
# "ctime": 1652188982,
# "status": "open"
# }
# ]
#
# Perpetual Swap
#
# [
# {
# "status": "finished",
# "size": -1,
# "left": 0,
# "id": 82750739203,
# "is_liq": False,
# "is_close": False,
# "contract": "BTC_USDT",
# "text": "web",
# "fill_price": "60721.3",
# "finish_as": "filled",
# "iceberg": 0,
# "tif": "ioc",
# "is_reduce_only": True,
# "create_time": 1635403475.412,
# "finish_time": 1635403475.4127,
# "price": "0"
# }
# ]
#
result = response
if openSpotOrders:
result = []
for i in range(0, len(response)):
orders = self.safe_value(response[i], 'orders')
result = self.array_concat(result, orders)
orders = self.parse_orders(result, market, since, limit)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
"""
Cancels an open order
:param str id: Order id
:param str symbol: Unified market symbol
:param dict params: Parameters specified by the exchange api
:param bool params['stop']: True if the order to be cancelled is a trigger order
:returns: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
type, query = self.handle_market_type_and_params('cancelOrder', market, params)
request, requestParams = self.spot_order_prepare_request(market, stop, query) if (type == 'spot' or type == 'margin') else self.prepare_request(market, type, query)
request['order_id'] = id
pathMiddle = 'Price' if stop else ''
method = self.get_supported_mapping(type, {
'spot': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'margin': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'swap': 'privateFuturesDeleteSettle' + pathMiddle + 'OrdersOrderId',
'future': 'privateDeliveryDeleteSettle' + pathMiddle + 'OrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# spot
#
# {
# "id": "95282841887",
# "text": "apiv4",
# "create_time": "1637383156",
# "update_time": "1637383235",
# "create_time_ms": 1637383156017,
# "update_time_ms": 1637383235085,
# "status": "cancelled",
# "currency_pair": "ETH_USDT",
# "type": "limit",
# "account": "spot",
# "side": "buy",
# "amount": "0.01",
# "price": "3500",
# "time_in_force": "gtc",
# "iceberg": "0",
# "left": "0.01",
# "fill_price": "0",
# "filled_total": "0",
# "fee": "0",
# "fee_currency": "ETH",
# "point_fee": "0",
# "gt_fee": "0",
# "gt_discount": False,
# "rebated_fee": "0",
# "rebated_fee_currency": "USDT"
# }
#
# spot conditional
#
# {
# "market": "ETH_USDT",
# "user": 2436035,
# "trigger": {
# "price": "3500",
# "rule": "\u003c=",
# "expiration": 86400
# },
# "put": {
# "type": "limit",
# "side": "buy",
# "price": "3500",
# "amount": "0.01000000000000000000",
# "account": "normal",
# "time_in_force": "gtc"
# },
# "id": 5891843,
# "ctime": 1637382379,
# "ftime": 1637382673,
# "status": "canceled"
# }
#
# perpetual swaps
#
# {
# id: "82241928192",
# contract: "BTC_USDT",
# mkfr: "0",
# tkfr: "0.0005",
# tif: "gtc",
# is_reduce_only: False,
# create_time: "1635196145.06",
# finish_time: "1635196233.396",
# price: "61000",
# size: "4",
# refr: "0",
# left: "4",
# text: "web",
# fill_price: "0",
# user: "6693577",
# finish_as: "cancelled",
# status: "finished",
# is_liq: False,
# refu: "0",
# is_close: False,
# iceberg: "0",
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value(params, 'stop')
params = self.omit(params, 'stop')
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
request, requestParams = self.multi_order_spot_prepare_request(market, stop, query) if (type == 'spot') else self.prepare_request(market, type, query)
methodTail = 'PriceOrders' if stop else 'Orders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotDelete' + methodTail,
'margin': 'privateSpotDelete' + methodTail,
'swap': 'privateFuturesDeleteSettle' + methodTail,
'future': 'privateDeliveryDeleteSettle' + methodTail,
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# [
# {
# "id": 139797004085,
# "contract": "ADA_USDT",
# "mkfr": "0",
# "tkfr": "0.0005",
# "tif": "gtc",
# "is_reduce_only": False,
# "create_time": 1647911169.343,
# "finish_time": 1647911226.849,
# "price": "0.8",
# "size": 1,
# "refr": "0.3",
# "left": 1,
# "text": "api",
# "fill_price": "0",
# "user": 6693577,
# "finish_as": "cancelled",
# "status": "finished",
# "is_liq": False,
# "refu": 2436035,
# "is_close": False,
# "iceberg": 0
# }
# ...
# ]
#
return self.parse_orders(response, market)
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
makes internal transfers of funds between accounts on the same exchange
:param str code: unified currency code for currency being transferred
:param float amount: the amount of currency to transfer
:param str fromAccount: the account to transfer currency from
:param str toAccount: the account to transfer currency to
:param dict params: Exchange specific parameters
:param dict params['symbol']: Unified market symbol *required for type == margin*
:returns: A `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
await self.load_markets()
currency = self.currency(code)
fromId = self.parse_account(fromAccount)
toId = self.parse_account(toAccount)
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'amount': truncated,
}
if not (fromId in self.options['accountsByType']):
request['from'] = 'margin'
request['currency_pair'] = fromId
else:
request['from'] = fromId
if not (toId in self.options['accountsByType']):
request['to'] = 'margin'
request['currency_pair'] = toId
else:
request['to'] = toId
if fromId == 'margin' or toId == 'margin':
symbol = self.safe_string_2(params, 'symbol', 'currency_pair')
if symbol is None:
raise ArgumentsRequired(self.id + ' transfer requires params["symbol"] for isolated margin transfers')
market = self.market(symbol)
request['currency_pair'] = market['id']
params = self.omit(params, 'symbol')
if (toId == 'futures') or (toId == 'delivery') or (fromId == 'futures') or (fromId == 'delivery'):
request['settle'] = currency['lowerCaseId']
response = await self.privateWalletPostTransfers(self.extend(request, params))
#
# according to the docs(however actual response seems to be an empty string '')
#
# {
# "currency": "BTC",
# "from": "spot",
# "to": "margin",
# "amount": "1",
# "currency_pair": "BTC_USDT"
# }
#
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'fromAccount': fromAccount,
'toAccount': toAccount,
'amount': self.parse_number(truncated),
})
def parse_account(self, account):
accountsByType = self.options['accountsByType']
if account in accountsByType:
return accountsByType[account]
elif account in self.markets:
market = self.market(account)
return market['id']
else:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' accounts must be one of ' + ', '.join(keys) + ' or an isolated margin symbol')
def parse_transfer(self, transfer, currency=None):
timestamp = self.milliseconds()
return {
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': self.safe_currency_code(None, currency),
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': None,
'info': transfer,
}
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' setLeverage() leverage should be between 1 and 100')
await self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'future': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request, query = self.prepare_request(market, None, params)
defaultMarginMode = self.safe_string_2(self.options, 'marginMode', 'defaultMarginMode')
crossLeverageLimit = self.safe_string(query, 'cross_leverage_limit')
marginMode = self.safe_string(query, 'marginMode', defaultMarginMode)
if crossLeverageLimit is not None:
marginMode = 'cross'
leverage = crossLeverageLimit
if marginMode == 'cross' or marginMode == 'cross_margin':
request['query'] = {
'cross_leverage_limit': str(leverage),
'leverage': '0',
}
else:
request['query'] = {
'leverage': str(leverage),
}
response = await getattr(self, method)(self.extend(request, query))
#
# {
# "value": "0",
# "leverage": "5",
# "mode": "single",
# "realised_point": "0",
# "contract": "BTC_USDT",
# "entry_price": "0",
# "mark_price": "62035.86",
# "history_point": "0",
# "realised_pnl": "0",
# "close_order": null,
# "size": 0,
# "cross_leverage_limit": "0",
# "pending_orders": 0,
# "adl_ranking": 6,
# "maintenance_rate": "0.005",
# "unrealised_pnl": "0",
# "user": 2436035,
# "leverage_max": "100",
# "history_pnl": "0",
# "risk_limit": "1000000",
# "margin": "0",
# "last_close_pnl": "0",
# "liq_price": "0"
# }
#
return response
def parse_position(self, position, market=None):
#
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "663337",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
#
contract = self.safe_string(position, 'contract')
market = self.safe_market(contract, market)
size = self.safe_string(position, 'size')
side = None
if Precise.string_gt(size, '0'):
side = 'long'
elif Precise.string_lt(size, '0'):
side = 'short'
maintenanceRate = self.safe_string(position, 'maintenance_rate')
notional = self.safe_string(position, 'value')
leverage = self.safe_string(position, 'leverage')
marginMode = None
if leverage == '0':
marginMode = 'cross'
else:
marginMode = 'isolated'
unrealisedPnl = self.safe_string(position, 'unrealised_pnl')
# Initial Position Margin = ( Position Value / Leverage ) + Close Position Fee
# *The default leverage under the full position is the highest leverage in the market.
# *Trading fee is charged as Taker Fee Rate(0.075%).
takerFee = '0.00075'
feePaid = Precise.string_mul(takerFee, notional)
initialMarginString = Precise.string_add(Precise.string_div(notional, leverage), feePaid)
percentage = Precise.string_mul(Precise.string_div(unrealisedPnl, initialMarginString), '100')
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': None,
'datetime': None,
'initialMargin': self.parse_number(initialMarginString),
'initialMarginPercentage': self.parse_number(Precise.string_div(initialMarginString, notional)),
'maintenanceMargin': self.parse_number(Precise.string_mul(maintenanceRate, notional)),
'maintenanceMarginPercentage': self.parse_number(maintenanceRate),
'entryPrice': self.safe_number(position, 'entry_price'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'leverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(Precise.string_abs(size)),
'contractSize': self.safe_value(market, 'contractSize'),
# 'realisedPnl': position['realised_pnl'],
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liq_price'),
'markPrice': self.safe_number(position, 'mark_price'),
'collateral': self.safe_number(position, 'margin'),
'marginMode': marginMode,
'marginType': marginMode, # deprecated
'side': side,
'percentage': self.parse_number(percentage),
}
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
async def fetch_positions(self, symbols=None, params={}):
"""
Fetch trades positions
* @param {[str]} symbols Not used by Gateio, but parsed internally by CCXT
:param dict params: exchange specific parameters
:param str params['settle']: 'btc' or 'usdt' - settle currency for perpetual swap and future - default="usdt" for swap and "btc" for future
:param str params['type']: swap or future, if not provided self.options['defaultType'] is used
:returns: An array of `position structures <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchPositions', None, params)
request, requestParams = self.prepare_request(None, type, query)
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettlePositions',
'future': 'privateDeliveryGetSettlePositions',
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# [
# {
# value: "12.475572",
# leverage: "0",
# mode: "single",
# realised_point: "0",
# contract: "BTC_USDT",
# entry_price: "62422.6",
# mark_price: "62377.86",
# history_point: "0",
# realised_pnl: "-0.00624226",
# close_order: null,
# size: "2",
# cross_leverage_limit: "25",
# pending_orders: "0",
# adl_ranking: "5",
# maintenance_rate: "0.005",
# unrealised_pnl: "-0.008948",
# user: "6693577",
# leverage_max: "100",
# history_pnl: "14.98868396636",
# risk_limit: "1000000",
# margin: "0.740721495056",
# last_close_pnl: "-0.041996015",
# liq_price: "59058.58"
# }
# ]
#
result = self.parse_positions(response)
return self.filter_by_array(result, 'symbol', symbols, False)
async def fetch_leverage_tiers(self, symbols=None, params={}):
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchLeverageTiers', None, params)
request, requestParams = self.prepare_request(None, type, query)
if type != 'future' and type != 'swap':
raise BadRequest(self.id + ' fetchLeverageTiers only supports swap and future')
method = self.get_supported_mapping(type, {
'swap': 'publicFuturesGetSettleContracts',
'future': 'publicDeliveryGetSettleContracts',
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# Perpetual swap
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
# Delivery Futures
#
# [
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
# ]
#
return self.parse_leverage_tiers(response, symbols, 'name')
def parse_market_leverage_tiers(self, info, market=None):
"""
* @ignore
https://www.gate.io/help/futures/perpetual/22162/instrctions-of-risk-limit
:param dict info: Exchange market response for 1 market
:param dict market: CCXT market
"""
#
# Perpetual swap
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
# Delivery Futures
#
# {
# "name": "BTC_USDT_20200814",
# "underlying": "BTC_USDT",
# "cycle": "WEEKLY",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "mark_type": "index",
# "last_price": "9017",
# "mark_price": "9019",
# "index_price": "9005.3",
# "basis_rate": "0.185095",
# "basis_value": "13.7",
# "basis_impact_value": "100000",
# "settle_price": "0",
# "settle_price_interval": 60,
# "settle_price_duration": 1800,
# "settle_fee_rate": "0.0015",
# "expire_time": 1593763200,
# "order_price_round": "0.1",
# "mark_price_round": "0.1",
# "leverage_min": "1",
# "leverage_max": "100",
# "maintenance_rate": "1000000",
# "risk_limit_base": "140.726652109199",
# "risk_limit_step": "1000000",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "ref_discount_rate": "0",
# "ref_rebate_rate": "0.2",
# "order_price_deviate": "0.5",
# "order_size_min": 1,
# "order_size_max": 1000000,
# "orders_limit": 50,
# "orderbook_id": 63,
# "trade_id": 26,
# "trade_size": 435,
# "position_size": 130,
# "config_change_time": 1593158867,
# "in_delisting": False
# }
#
maintenanceMarginUnit = self.safe_string(info, 'maintenance_rate') # '0.005',
leverageMax = self.safe_string(info, 'leverage_max') # '100',
riskLimitStep = self.safe_string(info, 'risk_limit_step') # '1000000',
riskLimitMax = self.safe_string(info, 'risk_limit_max') # '16000000',
initialMarginUnit = Precise.string_div('1', leverageMax)
maintenanceMarginRate = maintenanceMarginUnit
initialMarginRatio = initialMarginUnit
floor = '0'
tiers = []
while(Precise.string_lt(floor, riskLimitMax)):
cap = Precise.string_add(floor, riskLimitStep)
tiers.append({
'tier': self.parse_number(Precise.string_div(cap, riskLimitStep)),
'currency': self.safe_string(market, 'settle'),
'minNotional': self.parse_number(floor),
'maxNotional': self.parse_number(cap),
'maintenanceMarginRate': self.parse_number(maintenanceMarginRate),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRatio)),
'info': info,
})
maintenanceMarginRate = Precise.string_add(maintenanceMarginRate, maintenanceMarginUnit)
initialMarginRatio = Precise.string_add(initialMarginRatio, initialMarginUnit)
floor = cap
return tiers
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] # public, private
type = api[1] # spot, margin, future, delivery
query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = '' if (path == '') else ('/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication][type]
if url is None:
raise NotSupported(self.id + ' does not have a testnet for the ' + type + ' market type.')
url += entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
# eslint-disable-next-line quotes
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"label": "ORDER_NOT_FOUND", "message": "Order not found"}
# {"label": "INVALID_PARAM_VALUE", "message": "invalid argument: status"}
# {"label": "INVALID_PARAM_VALUE", "message": "invalid argument: Trigger.rule"}
# {"label": "INVALID_PARAM_VALUE", "message": "invalid argument: trigger.expiration invalid range"}
# {"label": "INVALID_ARGUMENT", "detail": "invalid size"}
#
label = self.safe_string(response, 'label')
if label is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], label, feedback)
raise ExchangeError(feedback)
| 43.971615
| 283
| 0.472485
|
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class gateio(Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['KR'],
'rateLimit': 10 / 3, 'version': 'v4',
'certified': True,
'pro': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'doc': 'https://www.gate.io/docs/apiv4/en/index.html',
'www': 'https://gate.io/',
'api': {
'public': {
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
'private': {
'withdrawals': 'https://api.gateio.ws/api/v4',
'wallet': 'https://api.gateio.ws/api/v4',
'futures': 'https://api.gateio.ws/api/v4',
'margin': 'https://api.gateio.ws/api/v4',
'delivery': 'https://api.gateio.ws/api/v4',
'spot': 'https://api.gateio.ws/api/v4',
'options': 'https://api.gateio.ws/api/v4',
},
},
'test': {
'public': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
'private': {
'futures': 'https://fx-api-testnet.gateio.ws/api/v4',
'delivery': 'https://fx-api-testnet.gateio.ws/api/v4',
},
},
'referral': {
'url': 'https://www.gate.io/ref/2436035',
'discount': 0.2,
},
},
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'createPostOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': False,
'createStopOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchNetworkDepositAddress': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactionFees': True,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': False,
'transfer': True,
'withdraw': True,
},
'api': {
'public': {
'wallet': {
'get': {
'wallet/currency_chains': 1.5,
},
},
'spot': {
'get': {
'currencies': 1,
'currencies/{currency}': 1,
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'tickers': 1,
'order_book': 1,
'trades': 1,
'candlesticks': 1,
},
},
'margin': {
'get': {
'currency_pairs': 1,
'currency_pairs/{currency_pair}': 1,
'cross/currencies': 1,
'cross/currencies/{currency}': 1,
'funding_book': 1,
},
},
'futures': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/funding_rate': 1.5,
'{settle}/insurance': 1.5,
'{settle}/contract_stats': 1.5,
'{settle}/liq_orders': 1.5,
},
},
'delivery': {
'get': {
'{settle}/contracts': 1.5,
'{settle}/contracts/{contract}': 1.5,
'{settle}/order_book': 1.5,
'{settle}/trades': 1.5,
'{settle}/candlesticks': 1.5,
'{settle}/tickers': 1.5,
'{settle}/insurance': 1.5,
},
},
'options': {
'get': {
'underlyings': 1.5,
'expirations': 1.5,
'contracts': 1.5,
'contracts/{contract}': 1.5,
'settlements': 1.5,
'settlements/{contract}': 1.5,
'order_book': 1.5,
'tickers': 1.5,
'underlying/tickers/{underlying}': 1.5,
'candlesticks': 1.5,
'underlying/candlesticks': 1.5,
'trades': 1.5,
},
},
},
'private': {
'withdrawals': {
'post': {
'': 3000, },
'delete': {
'{withdrawal_id}': 300,
},
},
'wallet': {
'get': {
'deposit_address': 300,
'withdrawals': 300,
'deposits': 300,
'sub_account_transfers': 300,
'withdraw_status': 300,
'sub_account_balances': 300,
'fee': 300,
'total_balance': 300,
},
'post': {
'transfers': 300,
'sub_account_transfers': 300,
},
},
'spot': {
'get': {
'accounts': 1,
'open_orders': 1,
'orders': 1,
'orders/{order_id}': 1,
'my_trades': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
'post': {
'batch_orders': 1,
'orders': 1,
'cancel_batch_orders': 1,
'price_orders': 1,
},
'delete': {
'orders': 1,
'orders/{order_id}': 1,
'price_orders': 1,
'price_orders/{order_id}': 1,
},
},
'margin': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'funding_accounts': 1.5,
'loans': 1.5,
'loans/{loan_id}': 1.5,
'loans/{loan_id}/repayment': 1.5,
'loan_records': 1.5,
'loan_records/{load_record_id}': 1.5,
'auto_repay': 1.5,
'transferable': 1.5,
'cross/accounts': 1.5,
'cross/account_book': 1.5,
'cross/loans': 1.5,
'cross/loans/{loan_id}': 1.5,
'cross/loans/repayments': 1.5,
'cross/transferable': 1.5,
'loan_records/{loan_record_id}': 1.5,
'borrowable': 1.5,
'cross/repayments': 1.5,
'cross/borrowable': 1.5,
},
'post': {
'loans': 1.5,
'merged_loans': 1.5,
'loans/{loan_id}/repayment': 1.5,
'auto_repay': 1.5,
'cross/loans': 1.5,
'cross/loans/repayments': 1.5,
'cross/repayments': 1.5,
},
'patch': {
'loans/{loan_id}': 1.5,
'loan_records/{loan_record_id}': 1.5,
},
'delete': {
'loans/{loan_id}': 1.5,
},
},
'futures': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/dual_mode': 1.5,
'{settle}/dual_comp/positions/{contract}': 1.5,
'{settle}/dual_comp/positions/{contract}/margin': 1.5,
'{settle}/dual_comp/positions/{contract}/leverage': 1.5,
'{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'delivery': {
'get': {
'{settle}/accounts': 1.5,
'{settle}/account_book': 1.5,
'{settle}/positions': 1.5,
'{settle}/positions/{contract}': 1.5,
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/my_trades': 1.5,
'{settle}/position_close': 1.5,
'{settle}/liquidates': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
'{settle}/settlements': 1.5,
},
'post': {
'{settle}/positions/{contract}/margin': 1.5,
'{settle}/positions/{contract}/leverage': 1.5,
'{settle}/positions/{contract}/risk_limit': 1.5,
'{settle}/orders': 1.5,
'{settle}/price_orders': 1.5,
},
'delete': {
'{settle}/orders': 1.5,
'{settle}/orders/{order_id}': 1.5,
'{settle}/price_orders': 1.5,
'{settle}/price_orders/{order_id}': 1.5,
},
},
'options': {
'get': {
'accounts': 1.5,
'account_book': 1.5,
'positions': 1.5,
'positions/{contract}': 1.5,
'position_close': 1.5,
'orders': 1.5,
'orders/{order_id}': 1.5,
'my_trades': 1.5,
},
'post': {
'orders': 1.5,
},
'delete': {
'orders': 1.5,
'orders/{order_id}': 1.5,
},
},
},
},
'timeframes': {
'10s': '10s',
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'8h': '8h',
'1d': '1d',
'7d': '7d',
'1w': '7d',
},
'commonCurrencies': {
'88MPH': 'MPH',
'AXIS': 'Axis DeFi',
'BIFI': 'Bitcoin File',
'BOX': 'DefiBox',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'EGG': 'Goose Finance',
'GTC': 'Game.com', 'GTC_HT': 'Game.com HT',
'GTC_BSC': 'Game.com BSC',
'HIT': 'HitChain',
'MM': 'Million', 'MPH': 'Morpher', 'RAI': 'Rai Reflex Index', 'SBTC': 'Super Bitcoin',
'TNC': 'Trinity Network Credit',
'TON': 'TONToken',
'VAI': 'VAIOT',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'headers': {
'X-Gate-Channel-Id': 'ccxt',
},
'options': {
'createOrder': {
'expiration': 86400, },
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
'BEP20': 'BSC',
},
'accountsByType': {
'funding': 'spot',
'spot': 'spot',
'margin': 'margin',
'cross_margin': 'cross_margin',
'cross': 'cross_margin',
'isolated': 'margin',
'swap': 'futures',
'future': 'delivery',
'futures': 'futures',
'delivery': 'delivery',
},
'defaultType': 'spot',
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
'future': {
'fetchMarkets': {
'settlementCurrencies': ['usdt', 'btc'],
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True,
'feeSide': 'get',
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00185')],
[self.parse_number('3'), self.parse_number('0.00175')],
[self.parse_number('6'), self.parse_number('0.00165')],
[self.parse_number('12.5'), self.parse_number('0.00155')],
[self.parse_number('25'), self.parse_number('0.00145')],
[self.parse_number('75'), self.parse_number('0.00135')],
[self.parse_number('200'), self.parse_number('0.00125')],
[self.parse_number('500'), self.parse_number('0.00115')],
[self.parse_number('1250'), self.parse_number('0.00105')],
[self.parse_number('2500'), self.parse_number('0.00095')],
[self.parse_number('3000'), self.parse_number('0.00085')],
[self.parse_number('6000'), self.parse_number('0.00075')],
[self.parse_number('11000'), self.parse_number('0.00065')],
[self.parse_number('20000'), self.parse_number('0.00055')],
[self.parse_number('40000'), self.parse_number('0.00055')],
[self.parse_number('75000'), self.parse_number('0.00055')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.002')],
[self.parse_number('1.5'), self.parse_number('0.00195')],
[self.parse_number('3'), self.parse_number('0.00185')],
[self.parse_number('6'), self.parse_number('0.00175')],
[self.parse_number('12.5'), self.parse_number('0.00165')],
[self.parse_number('25'), self.parse_number('0.00155')],
[self.parse_number('75'), self.parse_number('0.00145')],
[self.parse_number('200'), self.parse_number('0.00135')],
[self.parse_number('500'), self.parse_number('0.00125')],
[self.parse_number('1250'), self.parse_number('0.00115')],
[self.parse_number('2500'), self.parse_number('0.00105')],
[self.parse_number('3000'), self.parse_number('0.00095')],
[self.parse_number('6000'), self.parse_number('0.00085')],
[self.parse_number('11000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
[self.parse_number('40000'), self.parse_number('0.00065')],
[self.parse_number('75000'), self.parse_number('0.00065')],
],
},
},
'swap': {
'tierBased': True,
'feeSide': 'base',
'percentage': True,
'maker': self.parse_number('0.0'),
'taker': self.parse_number('0.0005'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.0000')],
[self.parse_number('1.5'), self.parse_number('-0.00005')],
[self.parse_number('3'), self.parse_number('-0.00005')],
[self.parse_number('6'), self.parse_number('-0.00005')],
[self.parse_number('12.5'), self.parse_number('-0.00005')],
[self.parse_number('25'), self.parse_number('-0.00005')],
[self.parse_number('75'), self.parse_number('-0.00005')],
[self.parse_number('200'), self.parse_number('-0.00005')],
[self.parse_number('500'), self.parse_number('-0.00005')],
[self.parse_number('1250'), self.parse_number('-0.00005')],
[self.parse_number('2500'), self.parse_number('-0.00005')],
[self.parse_number('3000'), self.parse_number('-0.00008')],
[self.parse_number('6000'), self.parse_number('-0.01000')],
[self.parse_number('11000'), self.parse_number('-0.01002')],
[self.parse_number('20000'), self.parse_number('-0.01005')],
[self.parse_number('40000'), self.parse_number('-0.02000')],
[self.parse_number('75000'), self.parse_number('-0.02005')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.00050')],
[self.parse_number('1.5'), self.parse_number('0.00048')],
[self.parse_number('3'), self.parse_number('0.00046')],
[self.parse_number('6'), self.parse_number('0.00044')],
[self.parse_number('12.5'), self.parse_number('0.00042')],
[self.parse_number('25'), self.parse_number('0.00040')],
[self.parse_number('75'), self.parse_number('0.00038')],
[self.parse_number('200'), self.parse_number('0.00036')],
[self.parse_number('500'), self.parse_number('0.00034')],
[self.parse_number('1250'), self.parse_number('0.00032')],
[self.parse_number('2500'), self.parse_number('0.00030')],
[self.parse_number('3000'), self.parse_number('0.00030')],
[self.parse_number('6000'), self.parse_number('0.00030')],
[self.parse_number('11000'), self.parse_number('0.00030')],
[self.parse_number('20000'), self.parse_number('0.00030')],
[self.parse_number('40000'), self.parse_number('0.00030')],
[self.parse_number('75000'), self.parse_number('0.00030')],
],
},
},
},
'exceptions': {
'exact': {
'INVALID_PARAM_VALUE': BadRequest,
'INVALID_PROTOCOL': BadRequest,
'INVALID_ARGUMENT': BadRequest,
'INVALID_REQUEST_BODY': BadRequest,
'MISSING_REQUIRED_PARAM': ArgumentsRequired,
'BAD_REQUEST': BadRequest,
'INVALID_CONTENT_TYPE': BadRequest,
'NOT_ACCEPTABLE': BadRequest,
'METHOD_NOT_ALLOWED': BadRequest,
'NOT_FOUND': ExchangeError,
'INVALID_CREDENTIALS': AuthenticationError,
'INVALID_KEY': AuthenticationError,
'IP_FORBIDDEN': AuthenticationError,
'READ_ONLY': PermissionDenied,
'INVALID_SIGNATURE': AuthenticationError,
'MISSING_REQUIRED_HEADER': AuthenticationError,
'REQUEST_EXPIRED': AuthenticationError,
'ACCOUNT_LOCKED': AccountSuspended,
'FORBIDDEN': PermissionDenied,
'SUB_ACCOUNT_NOT_FOUND': ExchangeError,
'SUB_ACCOUNT_LOCKED': AccountSuspended,
'MARGIN_BALANCE_EXCEPTION': ExchangeError,
'MARGIN_TRANSFER_FAILED': ExchangeError,
'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,
'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,
'ACCOUNT_EXCEPTION': ExchangeError,
'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,
'ADDRESS_NOT_USED': ExchangeError,
'TOO_FAST': RateLimitExceeded,
'WITHDRAWAL_OVER_LIMIT': ExchangeError,
'API_WITHDRAW_DISABLED': ExchangeNotAvailable,
'INVALID_WITHDRAW_ID': ExchangeError,
'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,
'INVALID_PRECISION': InvalidOrder,
'INVALID_CURRENCY': BadSymbol,
'INVALID_CURRENCY_PAIR': BadSymbol,
'POC_FILL_IMMEDIATELY': ExchangeError,
'ORDER_NOT_FOUND': OrderNotFound,
'CLIENT_ID_NOT_FOUND': OrderNotFound,
'ORDER_CLOSED': InvalidOrder,
'ORDER_CANCELLED': InvalidOrder,
'QUANTITY_NOT_ENOUGH': InvalidOrder,
'BALANCE_NOT_ENOUGH': InsufficientFunds,
'MARGIN_NOT_SUPPORTED': InvalidOrder,
'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,
'AMOUNT_TOO_LITTLE': InvalidOrder,
'AMOUNT_TOO_MUCH': InvalidOrder,
'REPEATED_CREATION': InvalidOrder,
'LOAN_NOT_FOUND': OrderNotFound,
'LOAN_RECORD_NOT_FOUND': OrderNotFound,
'NO_MATCHED_LOAN': ExchangeError,
'NOT_MERGEABLE': ExchangeError,
'NO_CHANGE': ExchangeError,
'REPAY_TOO_MUCH': ExchangeError,
'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,
'TOO_MANY_ORDERS': InvalidOrder,
'MIXED_ACCOUNT_TYPE': InvalidOrder,
'AUTO_BORROW_TOO_MUCH': ExchangeError,
'TRADE_RESTRICTED': InsufficientFunds,
'USER_NOT_FOUND': AccountNotEnabled,
'CONTRACT_NO_COUNTER': ExchangeError,
'CONTRACT_NOT_FOUND': BadSymbol,
'RISK_LIMIT_EXCEEDED': ExchangeError,
'INSUFFICIENT_AVAILABLE': InsufficientFunds,
'LIQUIDATE_IMMEDIATELY': InvalidOrder,
'LEVERAGE_TOO_HIGH': InvalidOrder,
'LEVERAGE_TOO_LOW': InvalidOrder,
'ORDER_NOT_OWNED': ExchangeError,
'ORDER_FINISHED': ExchangeError,
'POSITION_CROSS_MARGIN': ExchangeError,
'POSITION_IN_LIQUIDATION': ExchangeError,
'POSITION_IN_CLOSE': ExchangeError,
'POSITION_EMPTY': InvalidOrder,
'REMOVE_TOO_MUCH': ExchangeError,
'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,
'RISK_LIMIT_TOO_HIGH': ExchangeError,
'RISK_LIMIT_TOO_lOW': ExchangeError,
'PRICE_TOO_DEVIATED': InvalidOrder,
'SIZE_TOO_LARGE': InvalidOrder,
'SIZE_TOO_SMALL': InvalidOrder,
'PRICE_OVER_LIQUIDATION': InvalidOrder,
'PRICE_OVER_BANKRUPT': InvalidOrder,
'ORDER_POC_IMMEDIATE': InvalidOrder,
'INCREASE_POSITION': InvalidOrder,
'CONTRACT_IN_DELISTING': ExchangeError,
'INTERNAL': ExchangeNotAvailable,
'SERVER_ERROR': ExchangeNotAvailable,
'TOO_BUSY': ExchangeNotAvailable,
'CROSS_ACCOUNT_NOT_FOUND': ExchangeError,
},
},
'broad': {},
})
async def fetch_markets(self, params={}):
result = []
type, query = self.handle_market_type_and_params('fetchMarkets', None, params)
if type == 'spot' or type == 'margin':
result = await self.fetch_spot_markets(query)
if type == 'swap' or type == 'future':
result = await self.fetch_contract_markets(query) if type == 'option':
result = await self.fetch_option_markets(query)
resultLength = len(result)
if resultLength == 0:
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to " + "'spot', 'margin', 'swap', 'future' or 'option'") return result
async def fetch_spot_markets(self, params):
marginResponse = await self.publicMarginGetCurrencyPairs(params)
spotMarketsResponse = await self.publicSpotGetCurrencyPairs(params)
marginMarkets = self.index_by(marginResponse, 'id')
result = []
for i in range(0, len(spotMarketsResponse)):
spotMarket = spotMarketsResponse[i]
id = self.safe_string(spotMarket, 'id')
marginMarket = self.safe_value(marginMarkets, id)
market = self.deep_extend(marginMarket, spotMarket)
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
takerPercent = self.safe_string(market, 'fee')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
amountPrecisionString = self.safe_string(market, 'amount_precision')
pricePrecisionString = self.safe_string(market, 'precision')
tradeStatus = self.safe_string(market, 'trade_status')
leverage = self.safe_number(market, 'leverage')
defaultMinAmountLimit = self.parse_number(self.parse_precision(amountPrecisionString))
margin = leverage is not None
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': margin,
'swap': False,
'future': False,
'option': False,
'active': (tradeStatus == 'tradable'),
'contract': False,
'linear': None,
'inverse': None,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')),
'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(amountPrecisionString)),
'price': self.parse_number(self.parse_precision(pricePrecisionString)),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'leverage', 1),
},
'amount': {
'min': self.safe_number(spotMarket, 'min_base_amount', defaultMinAmountLimit),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_quote_amount'),
'max': self.safe_number(market, 'max_quote_amount'),
},
},
'info': market,
})
return result
async def fetch_contract_markets(self, params):
result = []
swapSettlementCurrencies = self.get_settlement_currencies('swap', 'fetchMarkets')
futureSettlementCurrencies = self.get_settlement_currencies('future', 'fetchMarkets')
for c in range(0, len(swapSettlementCurrencies)):
settleId = swapSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicFuturesGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
for c in range(0, len(futureSettlementCurrencies)):
settleId = futureSettlementCurrencies[c]
query = params
query['settle'] = settleId
response = await self.publicDeliveryGetSettleContracts(query)
for i in range(0, len(response)):
parsedMarket = self.parse_contract_market(response[i], settleId)
result.append(parsedMarket)
return result
def parse_contract_market(self, market, settleId):
id = self.safe_string(market, 'name')
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
date = self.safe_string(parts, 2)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
expiry = self.safe_timestamp(market, 'expire_time')
symbol = ''
marketType = 'swap'
if date is not None:
symbol = base + '/' + quote + ':' + settle + '-' + self.yymmdd(expiry, '')
marketType = 'future'
else:
symbol = base + '/' + quote + ':' + settle
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
isLinear = quote == settle
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': marketType,
'spot': False,
'margin': False,
'swap': marketType == 'swap',
'future': marketType == 'future',
'option': marketType == 'option',
'active': True,
'contract': True,
'linear': isLinear,
'inverse': not isLinear,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), 'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.safe_number(market, 'quanto_multiplier'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'leverage_min'),
'max': self.safe_number(market, 'leverage_max'),
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
async def fetch_option_markets(self, params={}):
result = []
underlyings = await self.fetch_option_underlyings()
for i in range(0, len(underlyings)):
underlying = underlyings[i]
query = params
query['underlying'] = underlying
response = await self.publicOptionsGetContracts(query)
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'name')
parts = underlying.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
expiry = self.safe_timestamp(market, 'expiration_time')
strike = self.safe_string(market, 'strike_price')
isCall = self.safe_value(market, 'is_call')
optionLetter = 'C' if isCall else 'P'
optionType = 'call' if isCall else 'put'
symbol = symbol + ':' + quote + '-' + self.yymmdd(expiry) + ':' + strike + ':' + optionLetter
priceDeviate = self.safe_string(market, 'order_price_deviate')
markPrice = self.safe_string(market, 'mark_price')
minMultiplier = Precise.string_sub('1', priceDeviate)
maxMultiplier = Precise.string_add('1', priceDeviate)
minPrice = Precise.string_mul(minMultiplier, markPrice)
maxPrice = Precise.string_mul(maxMultiplier, markPrice)
takerPercent = self.safe_string(market, 'taker_fee_rate')
makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': quote,
'baseId': baseId,
'quoteId': quoteId,
'settleId': quoteId,
'type': 'option',
'spot': False,
'margin': False,
'swap': False,
'future': False,
'option': True,
'active': True,
'contract': True,
'linear': True,
'inverse': False,
'taker': self.parse_number(Precise.string_div(takerPercent, '100')), 'maker': self.parse_number(Precise.string_div(makerPercent, '100')),
'contractSize': self.parse_number('1'),
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': strike,
'optionType': optionType,
'precision': {
'amount': self.parse_number('1'),
'price': self.safe_number(market, 'order_price_round'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'order_size_min'),
'max': self.safe_number(market, 'order_size_max'),
},
'price': {
'min': self.parse_number(minPrice),
'max': self.parse_number(maxPrice),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
async def fetch_option_underlyings(self):
underlyingsResponse = await self.publicOptionsGetUnderlyings()
underlyings = []
for i in range(0, len(underlyingsResponse)):
underlying = underlyingsResponse[i]
name = self.safe_string(underlying, 'name')
if name is not None:
underlyings.append(name)
return underlyings
def prepare_request(self, market=None, type=None, params={}):
request = {}
if market is not None:
if market['contract']:
request['contract'] = market['id']
request['settle'] = market['settleId']
else:
request['currency_pair'] = market['id']
else:
swap = type == 'swap'
future = type == 'future'
if swap or future:
defaultSettle = 'usdt' if swap else 'btc'
settle = self.safe_string_lower(params, 'settle', defaultSettle)
params = self.omit(params, 'settle')
request['settle'] = settle
return [request, params]
def spot_order_prepare_request(self, market=None, stop=False, params={}):
marginMode, query = self.get_margin_mode(stop, params)
request = {}
if not stop:
if market is None:
raise ArgumentsRequired(self.id + ' spotOrderPrepareRequest() requires a market argument for non-stop orders')
request['account'] = marginMode
request['currency_pair'] = market['id'] return [request, query]
def multi_order_spot_prepare_request(self, market=None, stop=False, params={}):
marginMode, query = self.get_margin_mode(stop, params)
request = {
'account': marginMode,
}
if market is not None:
if stop:
request['market'] = market['id']
else:
request['currency_pair'] = market['id']
return [request, query]
def get_margin_mode(self, stop, params):
defaultMarginMode = self.safe_string_lower_2(self.options, 'defaultMarginMode', 'marginMode', 'spot') marginMode = self.safe_string_lower_2(params, 'marginMode', 'account', defaultMarginMode)
params = self.omit(params, ['marginMode', 'account'])
if marginMode == 'cross':
marginMode = 'cross_margin'
elif marginMode == 'isolated':
marginMode = 'margin'
elif marginMode == '':
marginMode = 'spot'
if stop:
if marginMode == 'spot':
# gateio spot stop orders use the term normal instead of spot
marginMode = 'normal'
if marginMode == 'cross_margin':
raise BadRequest(self.id + ' getMarginMode() does not support stop orders for cross margin')
return [marginMode, params]
def get_settlement_currencies(self, type, method):
options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes
fetchMarketsContractOptions = self.safe_value(options, method, {})
defaultSettle = ['usdt'] if (type == 'swap') else ['btc']
return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)
async def fetch_currencies(self, params={}):
# sandbox/testnet only supports future markets
apiBackup = self.safe_value(self.urls, 'apiBackup')
if apiBackup is not None:
return None
response = await self.publicSpotGetCurrencies(params)
#
# {
# "currency": "BCN",
# "delisted": False,
# "withdraw_disabled": True,
# "withdraw_delayed": False,
# "deposit_disabled": True,
# "trade_disabled": False
# }
#
result = {}
# TODO: remove magic constants
amountPrecision = self.parse_number('1e-6')
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
currencyIdLower = self.safe_string_lower(entry, 'currency')
code = self.safe_currency_code(currencyId)
delisted = self.safe_value(entry, 'delisted')
withdrawDisabled = self.safe_value(entry, 'withdraw_disabled', False)
depositDisabled = self.safe_value(entry, 'deposit_disabled', False)
tradeDisabled = self.safe_value(entry, 'trade_disabled', False)
withdrawEnabled = not withdrawDisabled
depositEnabled = not depositDisabled
tradeEnabled = not tradeDisabled
listed = not delisted
active = listed and tradeEnabled and withdrawEnabled and depositEnabled
result[code] = {
'id': currencyId,
'lowerCaseId': currencyIdLower,
'name': None,
'code': code,
'precision': amountPrecision,
'info': entry,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None,
'fees': [],
'limits': self.limits,
}
return result
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRate() supports swap contracts only')
request, query = self.prepare_request(market, None, params)
response = await self.publicFuturesGetSettleContractsContract(self.extend(request, query))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
return self.parse_funding_rate(response)
async def fetch_funding_rates(self, symbols=None, params={}):
await self.load_markets()
request, query = self.prepare_request(None, 'swap', params)
response = await self.publicFuturesGetSettleContracts(self.extend(request, query))
#
# [
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
# ]
#
result = self.parse_funding_rates(response)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "name": "BTC_USDT",
# "type": "direct",
# "quanto_multiplier": "0.0001",
# "ref_discount_rate": "0",
# "order_price_deviate": "0.5",
# "maintenance_rate": "0.005",
# "mark_type": "index",
# "last_price": "38026",
# "mark_price": "37985.6",
# "index_price": "37954.92",
# "funding_rate_indicative": "0.000219",
# "mark_price_round": "0.01",
# "funding_offset": 0,
# "in_delisting": False,
# "risk_limit_base": "1000000",
# "interest_rate": "0.0003",
# "order_price_round": "0.1",
# "order_size_min": 1,
# "ref_rebate_rate": "0.2",
# "funding_interval": 28800,
# "risk_limit_step": "1000000",
# "leverage_min": "1",
# "leverage_max": "100",
# "risk_limit_max": "8000000",
# "maker_fee_rate": "-0.00025",
# "taker_fee_rate": "0.00075",
# "funding_rate": "0.002053",
# "order_size_max": 1000000,
# "funding_next_apply": 1610035200,
# "short_users": 977,
# "config_change_time": 1609899548,
# "trade_size": 28530850594,
# "position_size": 5223816,
# "long_users": 455,
# "funding_impact_value": "60000",
# "orders_limit": 50,
# "trade_id": 10851092,
# "orderbook_id": 2129638396
# }
#
marketId = self.safe_string(contract, 'name')
symbol = self.safe_symbol(marketId, market)
markPrice = self.safe_number(contract, 'mark_price')
indexPrice = self.safe_number(contract, 'index_price')
interestRate = self.safe_number(contract, 'interest_rate')
fundingRate = self.safe_number(contract, 'funding_rate')
fundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000
fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')
return {
'info': contract,
'symbol': symbol,
'markPrice': markPrice,
'indexPrice': indexPrice,
'interestRate': interestRate,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': fundingRate,
'fundingTimestamp': fundingTime,
'fundingDatetime': self.iso8601(fundingTime),
'nextFundingRate': fundingRateIndicative,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
async def fetch_network_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
addresses = self.safe_value(response, 'multichain_addresses')
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
result = {}
for i in range(0, len(addresses)):
entry = addresses[i]
#
# {
# "chain": "ETH",
# "address": "0x359a697945E79C7e17b634675BD73B33324E9408",
# "payment_id": "",
# "payment_name": "",
# "obtain_failed": "0"
# }
#
obtainFailed = self.safe_integer(entry, 'obtain_failed')
if obtainFailed:
continue
network = self.safe_string(entry, 'chain')
address = self.safe_string(entry, 'address')
tag = self.safe_string(entry, 'payment_id')
tagLength = len(tag)
tag = tag if tagLength else None
result[network] = {
'info': entry,
'code': code,
'address': address,
'tag': tag,
}
return result
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateWalletGetDepositAddress(self.extend(request, params))
#
# {
# "currency": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007",
# "multichain_addresses": [
# {
# "chain": "XRP",
# "address": "rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d",
# "payment_id": "391331007",
# "payment_name": "Tag",
# "obtain_failed": 0
# }
# ]
# }
#
currencyId = self.safe_string(response, 'currency')
code = self.safe_currency_code(currencyId)
addressField = self.safe_string(response, 'address')
tag = None
address = None
if addressField.find(' ') >= 0:
splitted = addressField.split(' ')
address = splitted[0]
tag = splitted[1]
else:
address = addressField
return {
'info': response,
'code': code,
'address': address,
'tag': tag,
'network': None,
}
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency_pair': market['id'],
}
response = await self.privateWalletGetFee(self.extend(request, params))
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fee(response, market)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateWalletGetFee(params)
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
return self.parse_trading_fees(response)
def parse_trading_fees(self, response):
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.market(symbol)
result[symbol] = self.parse_trading_fee(response, market)
return result
def parse_trading_fee(self, info, market=None):
#
# {
# "user_id": 1486602,
# "taker_fee": "0.002",
# "maker_fee": "0.002",
# "gt_discount": True,
# "gt_taker_fee": "0.0015",
# "gt_maker_fee": "0.0015",
# "loan_fee": "0.18",
# "point_type": "0",
# "futures_taker_fee": "0.0005",
# "futures_maker_fee": "0"
# }
#
contract = self.safe_value(market, 'contract')
takerKey = 'futures_taker_fee' if contract else 'taker_fee'
makerKey = 'futures_maker_fee' if contract else 'maker_fee'
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'maker': self.safe_number(info, makerKey),
'taker': self.safe_number(info, takerKey),
}
async def fetch_transaction_fees(self, codes=None, params={}):
await self.load_markets()
response = await self.privateWalletGetWithdrawStatus(params)
#
# {
# "currency": "MTN",
# "name": "Medicalchain",
# "name_cn": "Medicalchain",
# "deposit": "0",
# "withdraw_percent": "0%",
# "withdraw_fix": "900",
# "withdraw_day_limit": "500000",
# "withdraw_day_limit_remain": "500000",
# "withdraw_amount_mini": "900.1",
# "withdraw_eachtime_limit": "90000000000",
# "withdraw_fix_on_chains": {
# "ETH": "900"
# }
# }
#
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
withdrawFees[code] = {}
withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')
if withdrawFix is None:
withdrawFix = {}
withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')
keys = list(withdrawFix.keys())
for i in range(0, len(keys)):
key = keys[i]
withdrawFees[code][key] = self.parse_number(withdrawFix[key])
return {
'info': response,
'withdraw': withdrawFees,
'deposit': {},
}
async def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
# defaultType = 'future'
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('fetchFundingHistory', market, params)
request, requestParams = self.prepare_request(market, type, query)
request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'
if since is not None:
request['from'] = since / 1000
if limit is not None:
request['limit'] = limit
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettleAccountBook',
'future': 'privateDeliveryGetSettleAccountBook',
})
response = await getattr(self, method)(self.extend(request, requestParams))
#
# [
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# },
# ...
# ]
#
return self.parse_funding_histories(response, symbol, since, limit)
def parse_funding_histories(self, response, symbol, since, limit):
result = []
for i in range(0, len(response)):
entry = response[i]
funding = self.parse_funding_history(entry)
result.append(funding)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
def parse_funding_history(self, info, market=None):
#
# {
# "time": 1646899200,
# "change": "-0.027722",
# "balance": "11.653120591841",
# "text": "XRP_USDT",
# "type": "fund"
# }
#
timestamp = self.safe_timestamp(info, 'time')
marketId = self.safe_string(info, 'text')
market = self.safe_market(marketId, market)
return {
'info': info,
'symbol': self.safe_string(market, 'symbol'),
'code': self.safe_string(market, 'settle'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': None,
'amount': self.safe_number(info, 'change'),
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# request = {
# 'currency_pair': market['id'],
# 'interval': '0', # depth, 0 means no aggregation is applied, default to 0
# 'limit': limit, # maximum number of order depth data in asks or bids
# 'with_id': True, # return order book ID
# }
#
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetOrderBook',
'margin': 'publicSpotGetOrderBook',
'swap': 'publicFuturesGetSettleOrderBook',
'future': 'publicDeliveryGetSettleOrderBook',
})
if limit is not None:
request['limit'] = limit # default 10, max 100
request['with_id'] = True
response = await getattr(self, method)(self.extend(request, query))
#
# SPOT
#
# {
# "id": 6358770031
# "current": 1634345973275,
# "update": 1634345973271,
# "asks": [
# ["2.2241","12449.827"],
# ["2.2242","200"],
# ["2.2244","826.931"],
# ["2.2248","3876.107"],
# ["2.225","2377.252"],
# ["2.22509","439.484"],
# ["2.2251","1489.313"],
# ["2.2253","714.582"],
# ["2.2254","1349.784"],
# ["2.2256","234.701"]],
# "bids": [
# ["2.2236","32.465"],
# ["2.2232","243.983"],
# ["2.2231","32.207"],
# ["2.223","449.827"],
# ["2.2228","7.918"],
# ["2.2227","12703.482"],
# ["2.2226","143.033"],
# ["2.2225","143.027"],
# ["2.2224","1369.352"],
# ["2.2223","756.063"]
# ]
# }
#
# Perpetual Swap
#
# {
# "id": 6358770031
# "current": 1634350208.745,
# "asks": [
# {"s": 24909, "p": "61264.8"},
# {"s": 81, "p": "61266.6"},
# {"s": 2000, "p": "61267.6"},
# {"s": 490, "p": "61270.2"},
# {"s": 12, "p": "61270.4"},
# {"s": 11782, "p": "61273.2"},
# {"s": 14666, "p": "61273.3"},
# {"s": 22541, "p": "61273.4"},
# {"s": 33, "p": "61273.6"},
# {"s": 11980, "p": "61274.5"}
# ],
# "bids": [
# {"s": 41844, "p": "61264.7"},
# {"s": 13783, "p": "61263.3"},
# {"s": 1143, "p": "61259.8"},
# {"s": 81, "p": "61258.7"},
# {"s": 2471, "p": "61257.8"},
# {"s": 2471, "p": "61257.7"},
# {"s": 2471, "p": "61256.5"},
# {"s": 3, "p": "61254.2"},
# {"s": 114, "p": "61252.4"},
# {"s": 14372, "p": "61248.6"}
# ],
# "update": 1634350208.724
# }
#
timestamp = self.safe_integer(response, 'current')
if not market['spot']:
timestamp = timestamp * 1000
priceKey = 0 if market['spot'] else 'p'
amountKey = 1 if market['spot'] else 's'
nonce = self.safe_integer(response, 'id')
result = self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)
result['nonce'] = nonce
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, query))
ticker = self.safe_value(response, 0)
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# SPOT
#
# {
# "currency_pair": "KFC_USDT",
# "last": "7.255",
# "lowest_ask": "7.298",
# "highest_bid": "7.218",
# "change_percentage": "-1.18",
# "base_volume": "1219.053687865",
# "quote_volume": "8807.40299875455",
# "high_24h": "7.262",
# "low_24h": "7.095"
# }
#
# LINEAR/DELIVERY
#
# {
# "contract": "BTC_USDT",
# "last": "6432",
# "low_24h": "6278",
# "high_24h": "6790",
# "change_percentage": "4.43",
# "total_size": "32323904",
# "volume_24h": "184040233284",
# "volume_24h_btc": "28613220",
# "volume_24h_usd": "184040233284",
# "volume_24h_base": "28613220",
# "volume_24h_quote": "184040233284",
# "volume_24h_settle": "28613220",
# "mark_price": "6534",
# "funding_rate": "0.0001",
# "funding_rate_indicative": "0.0001",
# "index_price": "6531"
# }
#
marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'last')
ask = self.safe_string(ticker, 'lowest_ask')
bid = self.safe_string(ticker, 'highest_bid')
high = self.safe_string(ticker, 'high_24h')
low = self.safe_string(ticker, 'low_24h')
baseVolume = self.safe_string_2(ticker, 'base_volume', 'volume_24h_base')
quoteVolume = self.safe_string_2(ticker, 'quote_volume', 'volume_24h_quote')
percentage = self.safe_string(ticker, 'change_percentage')
return self.safe_ticker({
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchTickers', None, params)
request, requestParams = self.prepare_request(None, type, query)
method = self.get_supported_mapping(type, {
'spot': 'publicSpotGetTickers',
'margin': 'publicSpotGetTickers',
'swap': 'publicFuturesGetSettleTickers',
'future': 'publicDeliveryGetSettleTickers',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_tickers(response, symbols)
def fetch_balance_helper(self, entry):
account = self.account()
account['used'] = self.safe_string_2(entry, 'freeze', 'locked')
account['free'] = self.safe_string(entry, 'available')
account['total'] = self.safe_string(entry, 'total')
return account
async def fetch_balance(self, params={}):
await self.load_markets()
symbol = self.safe_string(params, 'symbol')
params = self.omit(params, 'symbol')
type, query = self.handle_market_type_and_params('fetchBalance', None, params)
request, requestParams = self.prepare_request(None, type, query)
marginMode, requestQuery = self.get_margin_mode(False, requestParams)
if symbol is not None:
market = self.market(symbol)
request['currency_pair'] = market['id']
method = self.get_supported_mapping(type, {
'spot': self.get_supported_mapping(marginMode, {
'spot': 'privateSpotGetAccounts',
'margin': 'privateMarginGetAccounts',
'cross_margin': 'privateMarginGetCrossAccounts',
}),
'funding': 'privateMarginGetFundingAccounts',
'swap': 'privateFuturesGetSettleAccounts',
'future': 'privateDeliveryGetSettleAccounts',
})
response = await getattr(self, method)(self.extend(request, requestQuery))
contract = (type == 'swap' or type == 'future')
if contract:
response = [response]
#
# Spot / margin funding
#
# [
# {
# "currency": "DBC",
# "available": "0",
# "locked": "0"
# "lent": "0", # margin funding only
# "total_lent": "0" # margin funding only
# },
# ...
# ]
#
# Margin
#
# [
# {
# "currency_pair": "DOGE_USDT",
# "locked": False,
# "risk": "9999.99",
# "base": {
# "currency": "DOGE",
# "available": "0",
# "locked": "0",
# "borrowed": "0",
# "interest": "0"
# },
# "quote": {
# "currency": "USDT",
# "available": "0.73402",
# "locked": "0",
# "borrowed": "0",
# "interest": "0"
# }
# },
# ...
# ]
#
# Cross margin
#
# {
# "user_id": 10406147,
# "locked": False,
# "balances": {
# "USDT": {
# "available": "1",
# "freeze": "0",
# "borrowed": "0",
# "interest": "0"
# }
# },
# "total": "1",
# "borrowed": "0",
# "interest": "0",
# "risk": "9999.99"
# }
#
# Perpetual Swap
#
# {
# order_margin: "0",
# point: "0",
# bonus: "0",
# history: {
# dnw: "2.1321",
# pnl: "11.5351",
# refr: "0",
# point_fee: "0",
# fund: "-0.32340576684",
# bonus_dnw: "0",
# point_refr: "0",
# bonus_offset: "0",
# fee: "-0.20132775",
# point_dnw: "0",
# },
# unrealised_pnl: "13.315100000006",
# total: "12.51345151332",
# available: "0",
# in_dual_mode: False,
# currency: "USDT",
# position_margin: "12.51345151332",
# user: "6333333",
# }
#
# Delivery Future
#
# {
# order_margin: "0",
# point: "0",
# history: {
# dnw: "1",
# pnl: "0",
# refr: "0",
# point_fee: "0",
# point_dnw: "0",
# settle: "0",
# settle_fee: "0",
# point_refr: "0",
# fee: "0",
# },
# unrealised_pnl: "0",
# total: "1",
# available: "1",
# currency: "USDT",
# position_margin: "0",
# user: "6333333",
# }
#
result = {
'info': response,
}
crossMargin = marginMode == 'cross_margin'
margin = marginMode == 'margin'
data = response
if 'balances' in data: # True for cross_margin
flatBalances = []
balances = self.safe_value(data, 'balances', [])
# inject currency and create an artificial balance object
# so it can follow the existent flow
keys = list(balances.keys())
for i in range(0, len(keys)):
currencyId = keys[i]
content = balances[currencyId]
content['currency'] = currencyId
flatBalances.append(content)
data = flatBalances
for i in range(0, len(data)):
entry = data[i]
if margin and not crossMargin:
marketId = self.safe_string(entry, 'currency_pair')
symbol = self.safe_symbol(marketId, None, '_')
base = self.safe_value(entry, 'base', {})
quote = self.safe_value(entry, 'quote', {})
baseCode = self.safe_currency_code(self.safe_string(base, 'currency', {}))
quoteCode = self.safe_currency_code(self.safe_string(quote, 'currency', {}))
subResult = {}
subResult[baseCode] = self.fetch_balance_helper(base)
subResult[quoteCode] = self.fetch_balance_helper(quote)
result[symbol] = self.safe_balance(subResult)
else:
code = self.safe_currency_code(self.safe_string(entry, 'currency', {}))
result[code] = self.fetch_balance_helper(entry)
return result if (margin and not crossMargin) else self.safe_balance(result)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
request = {}
request, params = self.prepare_request(market, None, params)
request['interval'] = self.timeframes[timeframe]
method = 'publicSpotGetCandlesticks'
if market['contract']:
maxLimit = 1999
limit = maxLimit if (limit is None) else min(limit, maxLimit)
if market['future']:
method = 'publicDeliveryGetSettleCandlesticks'
elif market['swap']:
method = 'publicFuturesGetSettleCandlesticks'
isMark = (price == 'mark')
isIndex = (price == 'index')
if isMark or isIndex:
request['contract'] = price + '_' + market['id']
params = self.omit(params, 'price')
else:
maxLimit = 1000
limit = maxLimit if (limit is None) else min(limit, maxLimit)
request['limit'] = limit
if since is not None:
duration = self.parse_timeframe(timeframe)
request['from'] = int(since / 1000)
toTimestamp = self.sum(request['from'], limit * duration - 1)
currentTimestamp = self.seconds()
request['to'] = min(toTimestamp, currentTimestamp)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRateHistory() supports swap contracts only')
request, query = self.prepare_request(market, None, params)
if limit is not None:
request['limit'] = limit
method = 'publicFuturesGetSettleFundingRate'
response = await getattr(self, method)(self.extend(request, query))
#
# {
# "r": "0.00063521",
# "t": "1621267200000",
# }
#
rates = []
for i in range(0, len(response)):
entry = response[i]
timestamp = self.safe_timestamp(entry, 't')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'r'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_ohlcv(self, ohlcv, market=None):
#
# Spot market candles
#
# [
# "1626163200", # Unix timestamp in seconds
# "346711.933138181617", # Trading volume
# "33165.23", # Close price
# "33260", # Highest price
# "33117.6", # Lowest price
# "33184.47" # Open price
# ]
#
# Mark and Index price candles
#
# {
# "t":1632873600, # Unix timestamp in seconds
# "o": "41025", # Open price
# "h": "41882.17", # Highest price
# "c": "41776.92", # Close price
# "l": "40783.94" # Lowest price
# }
#
if isinstance(ohlcv, list):
return [
self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds
self.safe_number(ohlcv, 5), # open price
self.safe_number(ohlcv, 3), # highest price
self.safe_number(ohlcv, 4), # lowest price
self.safe_number(ohlcv, 2), # close price
self.safe_number(ohlcv, 1), # trading volume
]
else:
# Mark and Index price candles
return [
self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds
self.safe_number(ohlcv, 'o'), # open price
self.safe_number(ohlcv, 'h'), # highest price
self.safe_number(ohlcv, 'l'), # lowest price
self.safe_number(ohlcv, 'c'), # close price
self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price
]
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
#
# spot
#
# request = {
# 'currency_pair': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id
# }
#
# swap, future
#
# request = {
# 'settle': market['settleId'],
# 'contract': market['id'],
# 'limit': limit, # maximum number of records to be returned in a single list
# 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results
# 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items
# 'to': self.seconds(), # end time in seconds, default to current time
# }
#
request, query = self.prepare_request(market, None, params)
method = self.get_supported_mapping(market['type'], {
'spot': 'publicSpotGetTrades',
'margin': 'publicSpotGetTrades',
'swap': 'publicFuturesGetSettleTrades',
'future': 'publicDeliveryGetSettleTrades',
})
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None and (market['contract']):
request['from'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# id: "1852958144",
# create_time: "1634673259",
# create_time_ms: "1634673259378.105000",
# currency_pair: "ADA_USDT",
# side: "sell",
# amount: "307.078",
# price: "2.104",
# }
# ]
#
# perpetual swap
#
# [
# {
# size: "2",
# id: "2522911",
# create_time_ms: "1634673380.182",
# create_time: "1634673380.182",
# contract: "ADA_USDT",
# price: "2.10486",
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
type = None
marginMode = None
request = {}
market = self.market(symbol) if (symbol is not None) else None
till = self.safe_number(params, 'till')
params = self.omit(params, 'till')
type, params = self.handle_market_type_and_params('fetchMyTrades', market, params)
contract = (type == 'swap') or (type == 'future')
if contract:
request, params = self.prepare_request(market, type, params)
else:
if market is not None:
request['currency_pair'] = market['id'] # Should always be set for non-stop
marginMode, params = self.get_margin_mode(False, params)
request['account'] = marginMode
if limit is not None:
request['limit'] = limit # default 100, max 1000
if since is not None:
request['from'] = int(since / 1000)
if till is not None:
request['to'] = int(till / 1000)
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGetMyTrades',
'margin': 'privateSpotGetMyTrades',
'swap': 'privateFuturesGetSettleMyTrades',
'future': 'privateDeliveryGetSettleMyTrades',
})
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# [
# {
# "id": "2876130500",
# "create_time": "1645464610",
# "create_time_ms": "1645464610777.399200",
# "currency_pair": "DOGE_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "10.97",
# "price": "0.137384",
# "order_id": "125924049993",
# "fee": "0.00301420496",
# "fee_currency": "USDT",
# "point_fee": "0",
# "gt_fee": "0"
# }
# ]
#
# perpetual swap
#
# [
# {
# "size": -5,
# "order_id": "130264979823",
# "id": 26884791,
# "role": "taker",
# "create_time": 1645465199.5472,
# "contract": "DOGE_USDT",
# "price": "0.136888"
# }
# ]
#
# future
#
# [
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public
#
# {
# "id": "1334253759",
# "create_time": "1626342738",
# "create_time_ms": "1626342738331.497000",
# "currency_pair": "BTC_USDT",
# "side": "sell",
# "amount": "0.0022",
# "price": "32452.16"
# }
#
# public ws
#
# {
# id: 221994511,
# time: 1580311438.618647,
# price: '9309',
# amount: '0.0019',
# type: 'sell'
# }
#
# spot rest
#
# {
# "id": "2876130500",
# "create_time": "1645464610",
# "create_time_ms": "1645464610777.399200",
# "currency_pair": "DOGE_USDT",
# "side": "sell",
# "role": "taker",
# "amount": "10.97",
# "price": "0.137384",
# "order_id": "125924049993",
# "fee": "0.00301420496",
# "fee_currency": "USDT",
# "point_fee": "0","gt_fee":"0"
# }
#
# perpetual swap rest
#
# {
# "size": -5,
# "order_id": "130264979823",
# "id": 26884791,
# "role": "taker",
# "create_time": 1645465199.5472,
# "contract": "DOGE_USDT",
# "price": "0.136888"
# }
#
# future rest
#
# {
# "id": 121234231,
# "create_time": 1514764800.123,
# "contract": "BTC_USDT",
# "order_id": "21893289839",
# "size": 100,
# "price": "100.123",
# "role": "taker"
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp_2(trade, 'time', 'create_time')
timestamp = self.safe_integer(trade, 'create_time_ms', timestamp)
marketId = self.safe_string_2(trade, 'currency_pair', 'contract')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string_2(trade, 'amount', 'size')
priceString = self.safe_string(trade, 'price')
contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'
amountString = Precise.string_abs(amountString)
side = self.safe_string_2(trade, 'side', 'type', contractSide)
orderId = self.safe_string(trade, 'order_id')
gtFee = self.safe_string(trade, 'gt_fee')
feeCurrency = None
feeCostString = None
if gtFee == '0':
feeCurrency = self.safe_string(trade, 'fee_currency')
feeCostString = self.safe_string(trade, 'fee')
else:
feeCurrency = 'GT'
feeCostString = gtFee
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
takerOrMaker = self.safe_string(trade, 'role')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit
if since is not None:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, 30 * 24 * 60 * 60)
response = await self.privateWalletGetWithdrawals(self.extend(request, params))
return self.parse_transactions(response, currency)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
request['chain'] = network
params = self.omit(params, 'network')
response = await self.privateWithdrawalsPost(self.extend(request, params))
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
return self.parse_transaction(response, currency)
def parse_transaction_status(self, status):
statuses = {
'PEND': 'pending',
'REQUEST': 'pending',
'DMOVE': 'pending',
'CANCEL': 'failed',
'DONE': 'ok',
'BCODE': 'ok', # GateCode withdrawal
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'd': 'deposit',
'w': 'withdrawal',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# deposits
#
# {
# "id": "d33361395",
# "currency": "USDT_TRX",
# "address": "TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z",
# "amount": "100",
# "txid": "ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0",
# "timestamp": "1626345819",
# "status": "DONE",
# "memo": ""
# }
#
# withdraw
#
# {
# "id": "w13389675",
# "currency": "USDT",
# "amount": "50",
# "address": "TUu2rLFrmzUodiWfYki7QCNtv1akL682p1",
# "memo": null
# }
#
id = self.safe_string(transaction, 'id')
type = None
amount = self.safe_string(transaction, 'amount')
if id[0] == 'b':
# GateCode handling
type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal'
amount = Precise.string_abs(amount)
elif id is not None:
type = self.parse_transaction_type(id[0])
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
txid = self.safe_string(transaction, 'txid')
rawStatus = self.safe_string(transaction, 'status')
status = self.parse_transaction_status(rawStatus)
address = self.safe_string(transaction, 'address')
fee = self.safe_number(transaction, 'fee')
tag = self.safe_string(transaction, 'memo')
if tag == '':
tag = None
timestamp = self.safe_timestamp(transaction, 'timestamp')
return {
'info': transaction,
'id': id,
'txid': txid,
'currency': code,
'amount': self.parse_number(amount),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'status': status,
'type': type,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'updated': None,
'fee': fee,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
contract = market['contract']
stopPrice = self.safe_number(params, 'stopPrice')
methodTail = 'Orders'
reduceOnly = self.safe_value_2(params, 'reduce_only', 'reduceOnly')
defaultTimeInForce = self.safe_value_2(params, 'tif', 'time_in_force', 'gtc')
timeInForce = self.safe_value(params, 'timeInForce', defaultTimeInForce)
postOnly = False
type, postOnly, timeInForce, params = self.is_post_only(type, timeInForce, None, params)
params = self.omit(params, ['stopPrice', 'reduce_only', 'reduceOnly', 'tif', 'time_in_force', 'timeInForce'])
if postOnly:
timeInForce = 'poc'
isLimitOrder = (type == 'limit')
isMarketOrder = (type == 'market')
if isLimitOrder and price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for ' + type + ' orders')
if contract:
amountToPrecision = self.amount_to_precision(symbol, amount)
signedAmount = Precise.string_neg(amountToPrecision) if (side == 'sell') else amountToPrecision
amount = int(signedAmount)
if isMarketOrder:
timeInForce = 'ioc'
price = 0
elif not isLimitOrder:
# Gateio doesn't have market orders for spot
raise InvalidOrder(self.id + ' createOrder() does not support ' + type + ' orders for ' + market['type'] + ' markets')
request = None
trigger = self.safe_value(params, 'trigger')
if stopPrice is None and trigger is None:
if contract:
request = {
'contract': market['id'], 'size': amount, 'price': self.price_to_precision(symbol, price), 'settle': market['settleId'], }
if reduceOnly is not None:
request['reduce_only'] = reduceOnly
if timeInForce is not None:
request['tif'] = timeInForce
else:
marginMode = None
marginMode, params = self.get_margin_mode(False, params)
request = {
'currency_pair': market['id'], 'type': type,
'account': marginMode, 'side': side,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
}
if timeInForce is not None:
request['time_in_force'] = timeInForce
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
if clientOrderId is not None:
if len(clientOrderId) > 28:
raise BadRequest(self.id + ' createOrder() clientOrderId or text param must be up to 28 characters')
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
request['text'] = clientOrderId
else:
if contract:
rule = 1 if (side == 'buy') else 2
request = {
'initial': {
'contract': market['id'],
'size': amount, 'price': self.price_to_precision(symbol, price), },
'trigger': {
'price': self.price_to_precision(symbol, stopPrice), 'rule': rule, },
'settle': market['settleId'],
}
expiration = self.safe_integer(params, 'expiration')
if expiration is not None:
request['trigger']['expiration'] = expiration
params = self.omit(params, 'expiration')
if reduceOnly is not None:
request['initial']['reduce_only'] = reduceOnly
if timeInForce is not None:
request['initial']['tif'] = timeInForce
else:
options = self.safe_value(self.options, 'createOrder', {})
marginMode = None
marginMode, params = self.get_margin_mode(True, params)
defaultExpiration = self.safe_integer(options, 'expiration')
expiration = self.safe_integer(params, 'expiration', defaultExpiration)
rule = '>=' if (side == 'buy') else '<='
triggerPrice = self.safe_value(trigger, 'price', stopPrice)
request = {
'trigger': {
'price': self.price_to_precision(symbol, triggerPrice),
'rule': rule, 'expiration': expiration, },
'put': {
'type': type,
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'account': marginMode,
'time_in_force': timeInForce, },
'market': market['id'],
}
methodTail = 'PriceOrders'
method = self.get_supported_mapping(market['type'], {
'spot': 'privateSpotPost' + methodTail,
'margin': 'privateSpotPost' + methodTail,
'swap': 'privateFuturesPostSettle' + methodTail,
'future': 'privateDeliveryPostSettle' + methodTail,
})
response = await getattr(self, method)(self.deep_extend(request, params))
return self.parse_order(response, market)
def parse_order_status(self, status):
statuses = {
'_new': 'open',
'filled': 'closed',
'cancelled': 'canceled',
'liquidated': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
put = self.safe_value_2(order, 'put', 'initial')
trigger = self.safe_value(order, 'trigger')
contract = self.safe_string(put, 'contract')
type = self.safe_string(put, 'type')
timeInForce = self.safe_string_upper_2(put, 'time_in_force', 'tif')
amount = self.safe_string_2(put, 'amount', 'size')
side = self.safe_string(put, 'side')
price = self.safe_string(put, 'price')
contract = self.safe_string(order, 'contract', contract)
type = self.safe_string(order, 'type', type)
timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif', timeInForce)
if timeInForce == 'POC':
timeInForce = 'PO'
postOnly = (timeInForce == 'PO')
amount = self.safe_string_2(order, 'amount', 'size', amount)
side = self.safe_string(order, 'side', side)
price = self.safe_string(order, 'price', price)
remaining = self.safe_string(order, 'left')
filled = Precise.string_sub(amount, remaining)
cost = self.safe_string(order, 'filled_total')
rawStatus = None
average = None
if put:
remaining = amount
filled = '0'
cost = '0'
if contract:
isMarketOrder = Precise.string_equals(price, '0') and (timeInForce == 'IOC')
type = 'market' if isMarketOrder else 'limit'
side = 'buy' if Precise.string_gt(amount, '0') else 'sell'
rawStatus = self.safe_string(order, 'finish_as', 'open')
average = self.safe_number(order, 'fill_price')
else:
rawStatus = self.safe_string(order, 'status')
timestamp = self.safe_integer(order, 'create_time_ms')
if timestamp is None:
timestamp = self.safe_timestamp_2(order, 'create_time', 'ctime')
lastTradeTimestamp = self.safe_integer(order, 'update_time_ms')
if lastTradeTimestamp is None:
lastTradeTimestamp = self.safe_timestamp_2(order, 'update_time', 'finish_time')
exchangeSymbol = self.safe_string_2(order, 'currency_pair', 'market', contract)
fees = []
gtFee = self.safe_string(order, 'gt_fee')
if gtFee:
fees.append({
'currency': 'GT',
'cost': gtFee,
})
fee = self.safe_string(order, 'fee')
if fee:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),
'cost': fee,
})
rebate = self.safe_string(order, 'rebated_fee')
if rebate:
fees.append({
'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),
'cost': Precise.string_neg(rebate),
})
numFeeCurrencies = len(fees)
multipleFeeCurrencies = numFeeCurrencies > 1
status = self.parse_order_status(rawStatus)
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': self.safe_string(order, 'text'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': self.safe_symbol(exchangeSymbol),
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': self.parse_number(price),
'stopPrice': self.safe_number(trigger, 'price'),
'average': average,
'amount': self.parse_number(Precise.string_abs(amount)),
'cost': Precise.string_abs(cost),
'filled': self.parse_number(Precise.string_abs(filled)),
'remaining': self.parse_number(Precise.string_abs(remaining)),
'fee': None if multipleFeeCurrencies else self.safe_value(fees, 0),
'fees': fees if multipleFeeCurrencies else [],
'trades': None,
'info': order,
}, market)
async def create_reduce_only_order(self, symbol, type, side, amount, price=None, params={}):
request = {
'reduceOnly': True,
}
return await self.create_order(symbol, type, side, amount, price, self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
clientOrderId = self.safe_string_2(params, 'text', 'clientOrderId')
orderId = id
if clientOrderId is not None:
params = self.omit(params, ['text', 'clientOrderId'])
if clientOrderId[0] != 't':
clientOrderId = 't-' + clientOrderId
orderId = clientOrderId
market = None if (symbol is None) else self.market(symbol)
type, query = self.handle_market_type_and_params('fetchOrder', market, params)
contract = (type == 'swap') or (type == 'future')
request, requestParams = self.prepare_request(market, type, query) if contract else self.spot_order_prepare_request(market, stop, query)
request['order_id'] = orderId
methodMiddle = 'PriceOrders' if stop else 'Orders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGet' + methodMiddle + 'OrderId',
'margin': 'privateSpotGet' + methodMiddle + 'OrderId',
'swap': 'privateFuturesGetSettle' + methodMiddle + 'OrderId',
'future': 'privateDeliveryGetSettle' + methodMiddle + 'OrderId',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_order(response, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_status('open', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_status('finished', symbol, since, limit, params)
async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value(params, 'stop')
params = self.omit(params, 'stop')
type, query = self.handle_market_type_and_params('fetchOrdersByStatus', market, params)
spot = (type == 'spot') or (type == 'margin')
request, requestParams = self.multi_order_spot_prepare_request(market, stop, query) if spot else self.prepare_request(market, type, query)
if status == 'closed':
status = 'finished'
request['status'] = status
if limit is not None:
request['limit'] = limit
if since is not None and spot:
request['from'] = int(since / 1000)
methodTail = 'PriceOrders' if stop else 'Orders'
openSpotOrders = spot and (status == 'open') and not stop
if openSpotOrders:
methodTail = 'OpenOrders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotGet' + methodTail,
'margin': 'privateSpotGet' + methodTail,
'swap': 'privateFuturesGetSettle' + methodTail,
'future': 'privateDeliveryGetSettle' + methodTail,
})
response = await getattr(self, method)(self.extend(request, requestParams))
result = response
if openSpotOrders:
result = []
for i in range(0, len(response)):
orders = self.safe_value(response[i], 'orders')
result = self.array_concat(result, orders)
orders = self.parse_orders(result, market, since, limit)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value_2(params, 'is_stop_order', 'stop', False)
params = self.omit(params, ['is_stop_order', 'stop'])
type, query = self.handle_market_type_and_params('cancelOrder', market, params)
request, requestParams = self.spot_order_prepare_request(market, stop, query) if (type == 'spot' or type == 'margin') else self.prepare_request(market, type, query)
request['order_id'] = id
pathMiddle = 'Price' if stop else ''
method = self.get_supported_mapping(type, {
'spot': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'margin': 'privateSpotDelete' + pathMiddle + 'OrdersOrderId',
'swap': 'privateFuturesDeleteSettle' + pathMiddle + 'OrdersOrderId',
'future': 'privateDeliveryDeleteSettle' + pathMiddle + 'OrdersOrderId',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
market = None if (symbol is None) else self.market(symbol)
stop = self.safe_value(params, 'stop')
params = self.omit(params, 'stop')
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
request, requestParams = self.multi_order_spot_prepare_request(market, stop, query) if (type == 'spot') else self.prepare_request(market, type, query)
methodTail = 'PriceOrders' if stop else 'Orders'
method = self.get_supported_mapping(type, {
'spot': 'privateSpotDelete' + methodTail,
'margin': 'privateSpotDelete' + methodTail,
'swap': 'privateFuturesDeleteSettle' + methodTail,
'future': 'privateDeliveryDeleteSettle' + methodTail,
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_orders(response, market)
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
await self.load_markets()
currency = self.currency(code)
fromId = self.parse_account(fromAccount)
toId = self.parse_account(toAccount)
truncated = self.currency_to_precision(code, amount)
request = {
'currency': currency['id'],
'amount': truncated,
}
if not (fromId in self.options['accountsByType']):
request['from'] = 'margin'
request['currency_pair'] = fromId
else:
request['from'] = fromId
if not (toId in self.options['accountsByType']):
request['to'] = 'margin'
request['currency_pair'] = toId
else:
request['to'] = toId
if fromId == 'margin' or toId == 'margin':
symbol = self.safe_string_2(params, 'symbol', 'currency_pair')
if symbol is None:
raise ArgumentsRequired(self.id + ' transfer requires params["symbol"] for isolated margin transfers')
market = self.market(symbol)
request['currency_pair'] = market['id']
params = self.omit(params, 'symbol')
if (toId == 'futures') or (toId == 'delivery') or (fromId == 'futures') or (fromId == 'delivery'):
request['settle'] = currency['lowerCaseId']
response = await self.privateWalletPostTransfers(self.extend(request, params))
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'fromAccount': fromAccount,
'toAccount': toAccount,
'amount': self.parse_number(truncated),
})
def parse_account(self, account):
accountsByType = self.options['accountsByType']
if account in accountsByType:
return accountsByType[account]
elif account in self.markets:
market = self.market(account)
return market['id']
else:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' accounts must be one of ' + ', '.join(keys) + ' or an isolated margin symbol')
def parse_transfer(self, transfer, currency=None):
timestamp = self.milliseconds()
return {
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': self.safe_currency_code(None, currency),
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': None,
'info': transfer,
}
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 0) or (leverage > 100):
raise BadRequest(self.id + ' setLeverage() leverage should be between 1 and 100')
await self.load_markets()
market = self.market(symbol)
method = self.get_supported_mapping(market['type'], {
'swap': 'privateFuturesPostSettlePositionsContractLeverage',
'future': 'privateDeliveryPostSettlePositionsContractLeverage',
})
request, query = self.prepare_request(market, None, params)
defaultMarginMode = self.safe_string_2(self.options, 'marginMode', 'defaultMarginMode')
crossLeverageLimit = self.safe_string(query, 'cross_leverage_limit')
marginMode = self.safe_string(query, 'marginMode', defaultMarginMode)
if crossLeverageLimit is not None:
marginMode = 'cross'
leverage = crossLeverageLimit
if marginMode == 'cross' or marginMode == 'cross_margin':
request['query'] = {
'cross_leverage_limit': str(leverage),
'leverage': '0',
}
else:
request['query'] = {
'leverage': str(leverage),
}
response = await getattr(self, method)(self.extend(request, query))
return response
def parse_position(self, position, market=None):
contract = self.safe_string(position, 'contract')
market = self.safe_market(contract, market)
size = self.safe_string(position, 'size')
side = None
if Precise.string_gt(size, '0'):
side = 'long'
elif Precise.string_lt(size, '0'):
side = 'short'
maintenanceRate = self.safe_string(position, 'maintenance_rate')
notional = self.safe_string(position, 'value')
leverage = self.safe_string(position, 'leverage')
marginMode = None
if leverage == '0':
marginMode = 'cross'
else:
marginMode = 'isolated'
unrealisedPnl = self.safe_string(position, 'unrealised_pnl')
takerFee = '0.00075'
feePaid = Precise.string_mul(takerFee, notional)
initialMarginString = Precise.string_add(Precise.string_div(notional, leverage), feePaid)
percentage = Precise.string_mul(Precise.string_div(unrealisedPnl, initialMarginString), '100')
return {
'info': position,
'symbol': self.safe_string(market, 'symbol'),
'timestamp': None,
'datetime': None,
'initialMargin': self.parse_number(initialMarginString),
'initialMarginPercentage': self.parse_number(Precise.string_div(initialMarginString, notional)),
'maintenanceMargin': self.parse_number(Precise.string_mul(maintenanceRate, notional)),
'maintenanceMarginPercentage': self.parse_number(maintenanceRate),
'entryPrice': self.safe_number(position, 'entry_price'),
'notional': self.parse_number(notional),
'leverage': self.safe_number(position, 'leverage'),
'unrealizedPnl': self.parse_number(unrealisedPnl),
'contracts': self.parse_number(Precise.string_abs(size)),
'contractSize': self.safe_value(market, 'contractSize'),
'marginRatio': None,
'liquidationPrice': self.safe_number(position, 'liq_price'),
'markPrice': self.safe_number(position, 'mark_price'),
'collateral': self.safe_number(position, 'margin'),
'marginMode': marginMode,
'marginType': marginMode, 'side': side,
'percentage': self.parse_number(percentage),
}
def parse_positions(self, positions):
result = []
for i in range(0, len(positions)):
result.append(self.parse_position(positions[i]))
return result
async def fetch_positions(self, symbols=None, params={}):
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchPositions', None, params)
request, requestParams = self.prepare_request(None, type, query)
method = self.get_supported_mapping(type, {
'swap': 'privateFuturesGetSettlePositions',
'future': 'privateDeliveryGetSettlePositions',
})
response = await getattr(self, method)(self.extend(request, requestParams))
result = self.parse_positions(response)
return self.filter_by_array(result, 'symbol', symbols, False)
async def fetch_leverage_tiers(self, symbols=None, params={}):
await self.load_markets()
type, query = self.handle_market_type_and_params('fetchLeverageTiers', None, params)
request, requestParams = self.prepare_request(None, type, query)
if type != 'future' and type != 'swap':
raise BadRequest(self.id + ' fetchLeverageTiers only supports swap and future')
method = self.get_supported_mapping(type, {
'swap': 'publicFuturesGetSettleContracts',
'future': 'publicDeliveryGetSettleContracts',
})
response = await getattr(self, method)(self.extend(request, requestParams))
return self.parse_leverage_tiers(response, symbols, 'name')
def parse_market_leverage_tiers(self, info, market=None):
maintenanceMarginUnit = self.safe_string(info, 'maintenance_rate') leverageMax = self.safe_string(info, 'leverage_max') riskLimitStep = self.safe_string(info, 'risk_limit_step') riskLimitMax = self.safe_string(info, 'risk_limit_max') initialMarginUnit = Precise.string_div('1', leverageMax)
maintenanceMarginRate = maintenanceMarginUnit
initialMarginRatio = initialMarginUnit
floor = '0'
tiers = []
while(Precise.string_lt(floor, riskLimitMax)):
cap = Precise.string_add(floor, riskLimitStep)
tiers.append({
'tier': self.parse_number(Precise.string_div(cap, riskLimitStep)),
'currency': self.safe_string(market, 'settle'),
'minNotional': self.parse_number(floor),
'maxNotional': self.parse_number(cap),
'maintenanceMarginRate': self.parse_number(maintenanceMarginRate),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRatio)),
'info': info,
})
maintenanceMarginRate = Precise.string_add(maintenanceMarginRate, maintenanceMarginUnit)
initialMarginRatio = Precise.string_add(initialMarginRatio, initialMarginUnit)
floor = cap
return tiers
def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):
authentication = api[0] type = api[1] query = self.omit(params, self.extract_params(path))
path = self.implode_params(path, params)
endPart = '' if (path == '') else ('/' + path)
entirePath = '/' + type + endPart
url = self.urls['api'][authentication][type]
if url is None:
raise NotSupported(self.id + ' does not have a testnet for the ' + type + ' market type.')
url += entirePath
if authentication == 'public':
if query:
url += '?' + self.urlencode(query)
else:
queryString = ''
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = self.urlencode(query)
url += '?' + queryString
else:
urlQueryParams = self.safe_value(query, 'query', {})
if urlQueryParams:
queryString = self.urlencode(urlQueryParams)
url += '?' + queryString
query = self.omit(query, 'query')
body = self.json(query)
bodyPayload = '' if (body is None) else body
bodySignature = self.hash(self.encode(bodyPayload), 'sha512')
timestamp = self.seconds()
timestampString = str(timestamp)
signaturePath = '/api/' + self.version + entirePath
payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]
payload = "\n".join(payloadArray)
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)
headers = {
'KEY': self.apiKey,
'Timestamp': timestampString,
'SIGN': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
label = self.safe_string(response, 'label')
if label is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], label, feedback)
raise ExchangeError(feedback)
| true
| true
|
f7015b2afba0129d69363926d5d8ce7086ff8751
| 715
|
py
|
Python
|
asyncworker/metrics/registry.py
|
async-worker/async-worker
|
9025d8f14d3fe6e1a2b1373c84abf41de575b359
|
[
"MIT"
] | 7
|
2021-05-02T19:26:14.000Z
|
2022-02-08T15:12:10.000Z
|
asyncworker/metrics/registry.py
|
async-worker/async-worker
|
9025d8f14d3fe6e1a2b1373c84abf41de575b359
|
[
"MIT"
] | 10
|
2021-05-02T15:37:55.000Z
|
2021-09-11T10:58:32.000Z
|
asyncworker/metrics/registry.py
|
async-worker/async-worker
|
9025d8f14d3fe6e1a2b1373c84abf41de575b359
|
[
"MIT"
] | null | null | null |
from prometheus_client import CollectorRegistry
from asyncworker.conf import settings
from asyncworker.metrics.collectors.gc import GCCollector
from asyncworker.metrics.collectors.platform import PlatformCollector
from asyncworker.metrics.collectors.process import ProcessCollector
NAMESPACE = (
f"{settings.METRICS_NAMESPACE}_{settings.METRICS_APPPREFIX}"
if settings.METRICS_APPPREFIX
else f"{settings.METRICS_NAMESPACE}"
)
REGISTRY = CollectorRegistry(auto_describe=True)
PLATFORM_COLLECTOR = PlatformCollector(registry=REGISTRY, namespace=NAMESPACE)
PROCESS_COLLECTOR = ProcessCollector(namespace=NAMESPACE, registry=REGISTRY)
GC_COLLECTOR = GCCollector(registry=REGISTRY, namespace=NAMESPACE)
| 35.75
| 78
| 0.846154
|
from prometheus_client import CollectorRegistry
from asyncworker.conf import settings
from asyncworker.metrics.collectors.gc import GCCollector
from asyncworker.metrics.collectors.platform import PlatformCollector
from asyncworker.metrics.collectors.process import ProcessCollector
NAMESPACE = (
f"{settings.METRICS_NAMESPACE}_{settings.METRICS_APPPREFIX}"
if settings.METRICS_APPPREFIX
else f"{settings.METRICS_NAMESPACE}"
)
REGISTRY = CollectorRegistry(auto_describe=True)
PLATFORM_COLLECTOR = PlatformCollector(registry=REGISTRY, namespace=NAMESPACE)
PROCESS_COLLECTOR = ProcessCollector(namespace=NAMESPACE, registry=REGISTRY)
GC_COLLECTOR = GCCollector(registry=REGISTRY, namespace=NAMESPACE)
| true
| true
|
f7015b5c72fd870e0de08b32abce1f336bb097ab
| 2,324
|
py
|
Python
|
src/graphql/validation/rules/unique_operation_types.py
|
hspedro/graphql-core
|
2b27e641d51789f532f989d3e125e04b33d24564
|
[
"MIT"
] | null | null | null |
src/graphql/validation/rules/unique_operation_types.py
|
hspedro/graphql-core
|
2b27e641d51789f532f989d3e125e04b33d24564
|
[
"MIT"
] | null | null | null |
src/graphql/validation/rules/unique_operation_types.py
|
hspedro/graphql-core
|
2b27e641d51789f532f989d3e125e04b33d24564
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional, Union
from ...error import GraphQLError
from ...language import (
OperationTypeDefinitionNode,
OperationType,
SchemaDefinitionNode,
SchemaExtensionNode,
)
from ...type import GraphQLObjectType
from . import SDLValidationContext, SDLValidationRule
__all__ = ["UniqueOperationTypesRule"]
class UniqueOperationTypesRule(SDLValidationRule):
"""Unique operation types
A GraphQL document is only valid if it has only one type per operation.
"""
def __init__(self, context: SDLValidationContext):
super().__init__(context)
schema = context.schema
self.defined_operation_types: Dict[
OperationType, OperationTypeDefinitionNode
] = {}
self.existing_operation_types: Dict[
OperationType, Optional[GraphQLObjectType]
] = (
{
OperationType.QUERY: schema.query_type,
OperationType.MUTATION: schema.mutation_type,
OperationType.SUBSCRIPTION: schema.subscription_type,
}
if schema
else {}
)
self.schema = schema
def check_operation_types(
self, node: Union[SchemaDefinitionNode, SchemaExtensionNode], *_args
):
for operation_type in node.operation_types or []:
operation = operation_type.operation
already_defined_operation_type = self.defined_operation_types.get(operation)
if self.existing_operation_types.get(operation):
self.report_error(
GraphQLError(
f"Type for {operation.value} already defined in the schema."
" It cannot be redefined.",
operation_type,
)
)
elif already_defined_operation_type:
self.report_error(
GraphQLError(
f"There can be only one {operation.value} type in schema.",
[already_defined_operation_type, operation_type],
)
)
else:
self.defined_operation_types[operation] = operation_type
return self.SKIP
enter_schema_definition = enter_schema_extension = check_operation_types
| 34.176471
| 88
| 0.609725
|
from typing import Dict, Optional, Union
from ...error import GraphQLError
from ...language import (
OperationTypeDefinitionNode,
OperationType,
SchemaDefinitionNode,
SchemaExtensionNode,
)
from ...type import GraphQLObjectType
from . import SDLValidationContext, SDLValidationRule
__all__ = ["UniqueOperationTypesRule"]
class UniqueOperationTypesRule(SDLValidationRule):
def __init__(self, context: SDLValidationContext):
super().__init__(context)
schema = context.schema
self.defined_operation_types: Dict[
OperationType, OperationTypeDefinitionNode
] = {}
self.existing_operation_types: Dict[
OperationType, Optional[GraphQLObjectType]
] = (
{
OperationType.QUERY: schema.query_type,
OperationType.MUTATION: schema.mutation_type,
OperationType.SUBSCRIPTION: schema.subscription_type,
}
if schema
else {}
)
self.schema = schema
def check_operation_types(
self, node: Union[SchemaDefinitionNode, SchemaExtensionNode], *_args
):
for operation_type in node.operation_types or []:
operation = operation_type.operation
already_defined_operation_type = self.defined_operation_types.get(operation)
if self.existing_operation_types.get(operation):
self.report_error(
GraphQLError(
f"Type for {operation.value} already defined in the schema."
" It cannot be redefined.",
operation_type,
)
)
elif already_defined_operation_type:
self.report_error(
GraphQLError(
f"There can be only one {operation.value} type in schema.",
[already_defined_operation_type, operation_type],
)
)
else:
self.defined_operation_types[operation] = operation_type
return self.SKIP
enter_schema_definition = enter_schema_extension = check_operation_types
| true
| true
|
f7015b8ac1c781333bd97a91af3d7a190b0a0877
| 754
|
py
|
Python
|
sdk/python/pulumi_azure_native/costmanagement/v20200301preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/costmanagement/v20200301preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/costmanagement/v20200301preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'CostAllocationPolicyType',
'CostAllocationResourceType',
'RuleStatus',
]
class CostAllocationPolicyType(str, Enum):
"""
Method of cost allocation for the rule
"""
FIXED_PROPORTION = "FixedProportion"
class CostAllocationResourceType(str, Enum):
"""
Type of resources contained in this cost allocation rule
"""
DIMENSION = "Dimension"
TAG = "Tag"
class RuleStatus(str, Enum):
"""
Status of the rule
"""
NOT_ACTIVE = "NotActive"
ACTIVE = "Active"
PROCESSING = "Processing"
| 20.944444
| 80
| 0.659151
|
from enum import Enum
__all__ = [
'CostAllocationPolicyType',
'CostAllocationResourceType',
'RuleStatus',
]
class CostAllocationPolicyType(str, Enum):
FIXED_PROPORTION = "FixedProportion"
class CostAllocationResourceType(str, Enum):
DIMENSION = "Dimension"
TAG = "Tag"
class RuleStatus(str, Enum):
NOT_ACTIVE = "NotActive"
ACTIVE = "Active"
PROCESSING = "Processing"
| true
| true
|
f7015bc8d1aae686fe1efb7e3d9e149f9f981a3e
| 12,289
|
py
|
Python
|
tools/train_eval.py
|
Sakura176/PointRCNN
|
a7fbb25e931609a39c32cb821a7c98a326e8b0c0
|
[
"MIT"
] | null | null | null |
tools/train_eval.py
|
Sakura176/PointRCNN
|
a7fbb25e931609a39c32cb821a7c98a326e8b0c0
|
[
"MIT"
] | null | null | null |
tools/train_eval.py
|
Sakura176/PointRCNN
|
a7fbb25e931609a39c32cb821a7c98a326e8b0c0
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
import torch.nn.functional as F
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024) # set the same seed
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(
img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k,
5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file=f)
def eval_one_epoch_joint(model, dataloader, epoch_id, result_dir):
# print("-----------------joint____________________________*******")
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if True:
# print("------------save_result__________________*******")
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
cnt += 1
calib = data['calib']
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs, 'calib': calib}
# model inference
ret_dict = model(input_data)
print(ret_dict.key())
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
seg_result = ret_dict['seg_result'].long() # (B, N)
rcnn_cls = ret_dict['rcnn_cls'].view(
batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(
batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not False:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(
rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(
cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(
pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (
gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(
roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (
gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label)
& fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {
'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if True:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(
rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(
pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(
boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(
).numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,
final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir,
'..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
ret_dict = {'empty_cnt': empty_cnt}
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(
total_gt_bbox, 1.0)
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
ret_dict.update(ap_dict)
return ap_result_str
| 43.733096
| 115
| 0.582391
|
import os
import numpy as np
import torch
import torch.nn.functional as F
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024)
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(
img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k,
5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file=f)
def eval_one_epoch_joint(model, dataloader, epoch_id, result_dir):
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if True:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
cnt += 1
calib = data['calib']
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs, 'calib': calib}
ret_dict = model(input_data)
print(ret_dict.key())
roi_scores_raw = ret_dict['roi_scores_raw'] roi_boxes3d = ret_dict['rois'] seg_result = ret_dict['seg_result'].long()
rcnn_cls = ret_dict['rcnn_cls'].view(
batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(
batch_size, -1, ret_dict['rcnn_reg'].shape[1])
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
recalled_num = gt_num = rpn_iou = 0
if not False:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(
rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(
cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(
pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (
gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
iou3d_in = iou3d_utils.boxes_iou3d_gpu(
roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (
gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label)
& fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {
'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if True:
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(
rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(
pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(
boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu(
).numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected,
final_output_dir, scores_selected, image_shape)
progress_bar.close()
split_file = os.path.join(dataset.imageset_dir,
'..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
ret_dict = {'empty_cnt': empty_cnt}
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(
total_gt_bbox, 1.0)
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
ret_dict.update(ap_dict)
return ap_result_str
| true
| true
|
f7015cc43d31242d7bc3480085b9a95866ebf963
| 7,532
|
py
|
Python
|
parallel_esn/bo.py
|
zblanks/parallel_esn
|
25a979d0863ce54a4a588f4216dc473d4e9c5e8a
|
[
"BSD-2-Clause"
] | 7
|
2019-05-06T00:32:24.000Z
|
2021-06-03T14:49:23.000Z
|
parallel_esn/bo.py
|
zblanks/parallel_esn
|
25a979d0863ce54a4a588f4216dc473d4e9c5e8a
|
[
"BSD-2-Clause"
] | 8
|
2019-04-20T04:51:38.000Z
|
2020-02-25T22:25:34.000Z
|
parallel_esn/bo.py
|
zblanks/parallel_esn
|
25a979d0863ce54a4a588f4216dc473d4e9c5e8a
|
[
"BSD-2-Clause"
] | 2
|
2019-04-19T11:05:51.000Z
|
2020-10-15T20:40:26.000Z
|
from math import log10
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
import numpy as np
from .utils import create_rng
class BO:
"""
Bayesian Optimization framework
"""
def __init__(self, k, hidden_dim=(100, 10000),
spectral_radius=(.9, 1.3), p=(0, 1),
alpha=(0, 1), beta=(1e-5, 1e3), random_state=None):
"""
Parameters
----------
k : tuple
Range of values for nearest neighbors in small-world network
hidden_dim : tuple, optional
Range values for the number of nodes in the reservoir
spectral_radius : tuple, optional
Range of values for the spectral radius for the reservoir
p : tuple, optional
Range of values to consider for the rewire probability
alpha : tuple, optional
Range of values for the leaking rate
beta : tuple, optional
Range of values for the L2 regression regularization
random_state : int or np.random.RandomState, optional
Random state initializer
"""
# Check that all the hyper-parameters are tuples with two entries
# which define the lower and upper bounds for the search space
hyper_params = [k, hidden_dim, spectral_radius, p, alpha, beta]
for param in hyper_params:
assert isinstance(param, tuple), "{} must be a tuple".format(param)
assert len(param) == 2, "{} must have two arguments; the upper" \
"and lower bound".format(param)
self.lwr_k = k[0]
self.upr_k = k[1]
self.lwr_hidden_dim = hidden_dim[0]
self.upr_hidden_dim = hidden_dim[1]
self.lwr_spectral_radius = spectral_radius[0]
self.upr_spectral_radius = spectral_radius[1]
self.lwr_p = p[0]
self.upr_p = p[1]
self.lwr_alpha = alpha[0]
self.upr_alpha = alpha[1]
self.lwr_beta = beta[0]
self.upr_beta = beta[1]
self.rng = create_rng(random_state)
self.gpr = GaussianProcessRegressor(kernel=Matern(),
random_state=self.rng)
# We need a placeholder for different hyper-parameter values that
# arrive and the corresponding error values
self.H = []
self.y = []
def update_gpr(self, X, y):
"""
Updates the Gaussian process with new data and error value
Updates the Gaussian process by adding, `H`, the list of
hyper-parameter values that were used with true function and y
is the resulting error from the model
Parameters
----------
X : list
Hyper-parameter values that were tried
y : float
Error that resulted from using X on the true function
Returns
-------
None
"""
self.H.append(X)
self.y.append(y)
self.gpr.fit(self.H, self.y)
def _sample_uniformly(self, num_samples, lwr_bound, upr_bound):
"""
Samples uniformly from a non-uniform space
Parameters
----------
num_samples : int
Number of samples to generate
lwr_bound : float
Hyper-parameter lower bound
upr_bound : float
Hyper-parameter upper bound
Returns
-------
param_vals : np.ndarray
Uniformly sampled hyper-parameter values
"""
# To sample in a uniform fashion we need the base ten representation
# of the upper and lower bounds and then we treat this as a region
# to sample
new_lwr_bound = log10(lwr_bound)
new_upr_bound = log10(upr_bound)
samples = self.rng.uniform(low=new_lwr_bound, high=new_upr_bound,
size=(num_samples, 1))
param_vals = np.power(10, samples)
return param_vals
def _build_options(self, num_samples=1000):
"""
Builds matrix which defines possible options for this iteration
Parameters
----------
num_samples : int, optional
Number of hyper-parameter samples to generate
Returns
-------
H_space : np.ndarray
Matrix of options for the ESN hyper-parameters
"""
k_vals = self.rng.randint(low=self.lwr_k, high=self.upr_k,
size=(num_samples, 1), dtype=np.int32)
hidden_dim_vals = self.rng.randint(low=self.lwr_hidden_dim,
high=self.upr_hidden_dim,
size=(num_samples, 1),
dtype=np.int32)
spectral_radius_vals = self.rng.uniform(low=self.lwr_spectral_radius,
high=self.upr_spectral_radius,
size=(num_samples, 1))
p_vals = self.rng.uniform(low=self.lwr_p, high=self.upr_p,
size=(num_samples, 1))
alpha_vals = self.rng.uniform(low=self.lwr_alpha, high=self.upr_alpha,
size=(num_samples, 1))
beta_vals = self._sample_uniformly(num_samples, self.lwr_beta,
self.upr_beta)
H_space = np.concatenate([k_vals, hidden_dim_vals,
spectral_radius_vals, p_vals, alpha_vals,
beta_vals], axis=1)
return H_space
def find_best_choices(self, num_samples=1000, num_choices=1):
"""
Finds the best hyper-parameter combination
Parameters
----------
num_samples : int, optional
Number of hyper-parameter samples to generate
num_choices : int, optional
Number of choices to select
Returns
-------
param_vals : dict
Best hyper-parameter values for the current Gaussian process
"""
H_space = self._build_options(num_samples)
# For the first MPI iteration because there is no prior, randomly
# sample num_choices points
if num_choices > 1:
idx = self.rng.choice(np.arange(num_samples), size=num_choices,
replace=False)
best_vals = H_space[idx, :]
else:
y_pred = self.gpr.sample_y(H_space, random_state=self.rng)
choices = np.argmin(y_pred)
best_vals = H_space[choices, :]
hyper_parameters = ['k', 'hidden_dim', 'spectral_radius', 'p', 'alpha',
'beta']
param_vals = {}
for (i, val) in enumerate(hyper_parameters):
if num_choices == 1:
param_vals[val] = best_vals[i]
if (val == 'k') or (val == 'hidden_dim'):
param_vals[val] = int(param_vals[val])
else:
param_vals[val] = best_vals[:, i]
if (val == 'k') or (val == 'hidden_dim'):
param_vals[val] = param_vals[val].astype(int)
return param_vals
def return_best_parameters(self):
min_error = min(self.y)
index = self.y.index(min_error)
print("Minimum Validation Error = ", min_error)
print("Best parameters found = ", self.H[index])
return min_error, self.H[index]
| 35.196262
| 79
| 0.556957
|
from math import log10
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
import numpy as np
from .utils import create_rng
class BO:
def __init__(self, k, hidden_dim=(100, 10000),
spectral_radius=(.9, 1.3), p=(0, 1),
alpha=(0, 1), beta=(1e-5, 1e3), random_state=None):
hyper_params = [k, hidden_dim, spectral_radius, p, alpha, beta]
for param in hyper_params:
assert isinstance(param, tuple), "{} must be a tuple".format(param)
assert len(param) == 2, "{} must have two arguments; the upper" \
"and lower bound".format(param)
self.lwr_k = k[0]
self.upr_k = k[1]
self.lwr_hidden_dim = hidden_dim[0]
self.upr_hidden_dim = hidden_dim[1]
self.lwr_spectral_radius = spectral_radius[0]
self.upr_spectral_radius = spectral_radius[1]
self.lwr_p = p[0]
self.upr_p = p[1]
self.lwr_alpha = alpha[0]
self.upr_alpha = alpha[1]
self.lwr_beta = beta[0]
self.upr_beta = beta[1]
self.rng = create_rng(random_state)
self.gpr = GaussianProcessRegressor(kernel=Matern(),
random_state=self.rng)
self.H = []
self.y = []
def update_gpr(self, X, y):
self.H.append(X)
self.y.append(y)
self.gpr.fit(self.H, self.y)
def _sample_uniformly(self, num_samples, lwr_bound, upr_bound):
new_lwr_bound = log10(lwr_bound)
new_upr_bound = log10(upr_bound)
samples = self.rng.uniform(low=new_lwr_bound, high=new_upr_bound,
size=(num_samples, 1))
param_vals = np.power(10, samples)
return param_vals
def _build_options(self, num_samples=1000):
k_vals = self.rng.randint(low=self.lwr_k, high=self.upr_k,
size=(num_samples, 1), dtype=np.int32)
hidden_dim_vals = self.rng.randint(low=self.lwr_hidden_dim,
high=self.upr_hidden_dim,
size=(num_samples, 1),
dtype=np.int32)
spectral_radius_vals = self.rng.uniform(low=self.lwr_spectral_radius,
high=self.upr_spectral_radius,
size=(num_samples, 1))
p_vals = self.rng.uniform(low=self.lwr_p, high=self.upr_p,
size=(num_samples, 1))
alpha_vals = self.rng.uniform(low=self.lwr_alpha, high=self.upr_alpha,
size=(num_samples, 1))
beta_vals = self._sample_uniformly(num_samples, self.lwr_beta,
self.upr_beta)
H_space = np.concatenate([k_vals, hidden_dim_vals,
spectral_radius_vals, p_vals, alpha_vals,
beta_vals], axis=1)
return H_space
def find_best_choices(self, num_samples=1000, num_choices=1):
H_space = self._build_options(num_samples)
if num_choices > 1:
idx = self.rng.choice(np.arange(num_samples), size=num_choices,
replace=False)
best_vals = H_space[idx, :]
else:
y_pred = self.gpr.sample_y(H_space, random_state=self.rng)
choices = np.argmin(y_pred)
best_vals = H_space[choices, :]
hyper_parameters = ['k', 'hidden_dim', 'spectral_radius', 'p', 'alpha',
'beta']
param_vals = {}
for (i, val) in enumerate(hyper_parameters):
if num_choices == 1:
param_vals[val] = best_vals[i]
if (val == 'k') or (val == 'hidden_dim'):
param_vals[val] = int(param_vals[val])
else:
param_vals[val] = best_vals[:, i]
if (val == 'k') or (val == 'hidden_dim'):
param_vals[val] = param_vals[val].astype(int)
return param_vals
def return_best_parameters(self):
min_error = min(self.y)
index = self.y.index(min_error)
print("Minimum Validation Error = ", min_error)
print("Best parameters found = ", self.H[index])
return min_error, self.H[index]
| true
| true
|
f7015d2b3830806be972d5bfd60bcbe4ef3a2efc
| 1,924
|
py
|
Python
|
model/encoder/model_export_test.py
|
hjonnala/deeplab2
|
1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e
|
[
"Apache-2.0"
] | null | null | null |
model/encoder/model_export_test.py
|
hjonnala/deeplab2
|
1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e
|
[
"Apache-2.0"
] | null | null | null |
model/encoder/model_export_test.py
|
hjonnala/deeplab2
|
1868757c4333ec5287cc0bf0a6bbf38fbbe34c2e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of model exports for axial_resnet_instances."""
import os
from absl import flags
from absl.testing import parameterized
import tensorflow as tf
from deeplab2.model.encoder import axial_resnet_instances
FLAGS = flags.FLAGS
class ModelExportTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet50',),
('resnet50_beta',),
('max_deeplab_s_backbone',),
('max_deeplab_l_backbone',),
('axial_resnet_s',),
('axial_resnet_l',),
('axial_deeplab_s',),
('axial_deeplab_l',),
('swidernet',),
('axial_swidernet',),
)
def test_model_export(self, model_name):
model = axial_resnet_instances.get_model(
model_name,
output_stride=16,
backbone_layer_multiplier=1.0,
bn_layer=tf.keras.layers.BatchNormalization,
conv_kernel_weight_decay=0.0001,
# Test with small models only.
num_blocks=[2, 2, 2, 2],
# Disable drop path as it is not compatible with model exporting.
block_group_config={'drop_path_keep_prob': 1.0})
model(tf.keras.Input([257, 257, 3], batch_size=1), training=False)
export_dir = os.path.join(
FLAGS.test_tmpdir, 'test_model_export', model_name)
model.save(export_dir)
if __name__ == '__main__':
tf.test.main()
| 31.032258
| 74
| 0.701663
|
import os
from absl import flags
from absl.testing import parameterized
import tensorflow as tf
from deeplab2.model.encoder import axial_resnet_instances
FLAGS = flags.FLAGS
class ModelExportTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet50',),
('resnet50_beta',),
('max_deeplab_s_backbone',),
('max_deeplab_l_backbone',),
('axial_resnet_s',),
('axial_resnet_l',),
('axial_deeplab_s',),
('axial_deeplab_l',),
('swidernet',),
('axial_swidernet',),
)
def test_model_export(self, model_name):
model = axial_resnet_instances.get_model(
model_name,
output_stride=16,
backbone_layer_multiplier=1.0,
bn_layer=tf.keras.layers.BatchNormalization,
conv_kernel_weight_decay=0.0001,
num_blocks=[2, 2, 2, 2],
block_group_config={'drop_path_keep_prob': 1.0})
model(tf.keras.Input([257, 257, 3], batch_size=1), training=False)
export_dir = os.path.join(
FLAGS.test_tmpdir, 'test_model_export', model_name)
model.save(export_dir)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f7015e1fbd223d4d58916cb28639860f971d3ab0
| 519
|
py
|
Python
|
114_sdoc7/run.py
|
sunlightlabs/senate_disbursements
|
3e5989d075a91271c581e32c69f7ee9beb7eebad
|
[
"BSD-2-Clause"
] | 2
|
2017-06-12T18:09:10.000Z
|
2021-04-18T17:11:06.000Z
|
114_sdoc7/run.py
|
dwillis/senate_disbursements
|
3e5989d075a91271c581e32c69f7ee9beb7eebad
|
[
"BSD-2-Clause"
] | 1
|
2017-10-03T14:18:00.000Z
|
2017-10-03T14:50:35.000Z
|
114_sdoc7/run.py
|
dwillis/senate_disbursements
|
3e5989d075a91271c581e32c69f7ee9beb7eebad
|
[
"BSD-2-Clause"
] | 5
|
2016-09-20T23:42:34.000Z
|
2017-10-05T19:23:13.000Z
|
from rip_pages import rip_pages
from read_pages import read_pages
from format_csv import format_csv
# STEP 1: CONFIG VARIABLES
SOURCE_DOC = '114sdoc7'
FILE_NAME = "GPO-CDOC-" + SOURCE_DOC + ".pdf"
OUT_FILE = 'senate_data.csv'
MISSING_FILE = 'missing_data.json'
START_PAGE = 17
END_PAGE = 2259
# STEP 2: Rip text, read pages, format output
rip_pages(FILE_NAME, START_PAGE, END_PAGE)
read_pages(START_PAGE, END_PAGE, OUT_FILE, MISSING_FILE)
format_csv(SOURCE_DOC, OUT_FILE)
# STEP 3: Reconcile data in MISSING_FILE
| 23.590909
| 56
| 0.782274
|
from rip_pages import rip_pages
from read_pages import read_pages
from format_csv import format_csv
SOURCE_DOC = '114sdoc7'
FILE_NAME = "GPO-CDOC-" + SOURCE_DOC + ".pdf"
OUT_FILE = 'senate_data.csv'
MISSING_FILE = 'missing_data.json'
START_PAGE = 17
END_PAGE = 2259
rip_pages(FILE_NAME, START_PAGE, END_PAGE)
read_pages(START_PAGE, END_PAGE, OUT_FILE, MISSING_FILE)
format_csv(SOURCE_DOC, OUT_FILE)
| true
| true
|
f7015e22936fea6cb9c989e8330ff91377b8db69
| 557
|
py
|
Python
|
Fundamentals/Dictionaries/Lect_Demo..py
|
LuGeorgiev/PythonSelfLearning
|
db8fcff2c2df8946d6acf2a2e5677eccf2bbe5dc
|
[
"MIT"
] | null | null | null |
Fundamentals/Dictionaries/Lect_Demo..py
|
LuGeorgiev/PythonSelfLearning
|
db8fcff2c2df8946d6acf2a2e5677eccf2bbe5dc
|
[
"MIT"
] | null | null | null |
Fundamentals/Dictionaries/Lect_Demo..py
|
LuGeorgiev/PythonSelfLearning
|
db8fcff2c2df8946d6acf2a2e5677eccf2bbe5dc
|
[
"MIT"
] | null | null | null |
my_list = [1, 2, 2, 4, 6]
#print reverse
print(my_list[::-1])
student = {'user': 'Lubo',
'pass': 'admin',
'course': ['C# Fundamentals', 'C# ASP', 'Algorithms']}
for key in student:
print(key)
for kvp in student.items():
print(f'the key is: {kvp[0]}, and values are: {kvp[1]} ')
print(student['pass'])
print(student.get('Pass', 'Sorry mate no such key'))
if 'pass' in student.keys():
print('Here')
else:
print('Not here')
second_part_student = {
'age': 25
}
student.update(second_part_student)
print(student)
| 19.206897
| 65
| 0.603232
|
my_list = [1, 2, 2, 4, 6]
print(my_list[::-1])
student = {'user': 'Lubo',
'pass': 'admin',
'course': ['C# Fundamentals', 'C# ASP', 'Algorithms']}
for key in student:
print(key)
for kvp in student.items():
print(f'the key is: {kvp[0]}, and values are: {kvp[1]} ')
print(student['pass'])
print(student.get('Pass', 'Sorry mate no such key'))
if 'pass' in student.keys():
print('Here')
else:
print('Not here')
second_part_student = {
'age': 25
}
student.update(second_part_student)
print(student)
| true
| true
|
f7015eff97146dcc90e0d346cf7013c167c3b070
| 748
|
py
|
Python
|
datas/exameUltrassom.py
|
mdietterle/aulas
|
b289a7252c2c8f7dfb4ee5482326a94e7d87ee45
|
[
"Apache-2.0"
] | null | null | null |
datas/exameUltrassom.py
|
mdietterle/aulas
|
b289a7252c2c8f7dfb4ee5482326a94e7d87ee45
|
[
"Apache-2.0"
] | null | null | null |
datas/exameUltrassom.py
|
mdietterle/aulas
|
b289a7252c2c8f7dfb4ee5482326a94e7d87ee45
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from dateutil.relativedelta import relativedelta
print("Programa para calcular o prazo de exame de ultrassom...\nO mesmo deve ser feito entre 22 e 24 semanas de gestação")
print("você deverá informar com quantas semanasa de gestação a paciente se encontra, no formato aaaa/mm/dd")
semanas = int(input("Com quantas semanas de gestação a paciente se encontra hoje? "))
exameInicio = 22-semanas
exameFinal = 24 - semanas
morfologicoInicio = datetime.date.today()+ relativedelta(weeks=exameInicio)
morfologicoFinal = datetime.date.today() + relativedelta(weeks=exameFinal)
dfinal = morfologicoFinal.strftime('%d/%m/%Y')
dinicial = morfologicoInicio.strftime('%d/%m/%Y')
print("O exame deverá ser feito entre ",dinicial, " e ", dfinal)
| 53.428571
| 122
| 0.778075
|
import datetime
from dateutil.relativedelta import relativedelta
print("Programa para calcular o prazo de exame de ultrassom...\nO mesmo deve ser feito entre 22 e 24 semanas de gestação")
print("você deverá informar com quantas semanasa de gestação a paciente se encontra, no formato aaaa/mm/dd")
semanas = int(input("Com quantas semanas de gestação a paciente se encontra hoje? "))
exameInicio = 22-semanas
exameFinal = 24 - semanas
morfologicoInicio = datetime.date.today()+ relativedelta(weeks=exameInicio)
morfologicoFinal = datetime.date.today() + relativedelta(weeks=exameFinal)
dfinal = morfologicoFinal.strftime('%d/%m/%Y')
dinicial = morfologicoInicio.strftime('%d/%m/%Y')
print("O exame deverá ser feito entre ",dinicial, " e ", dfinal)
| true
| true
|
f701614adbe0c289b89493a7ae3140602778980b
| 2,333
|
py
|
Python
|
jp.atcoder/abc012/abc012_4/21865313.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc012/abc012_4/21865313.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc012/abc012_4/21865313.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Generator, NoReturn
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = self.async_readlines()
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import ABC, abstractmethod
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import floyd_warshall
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
n = reader.int()
m = reader.int()
a = [reader.int() for _ in range(3 * m)]
a = np.array(
a,
).reshape(m, 3)
a, b, t = a.T
self.n, self.m = n, m
self.a = a - 1
self.b = b - 1
self.t = t
def solve(self):
self.compute_dist_mat()
dist = self.dist
d = dist.max(axis=1).min()
print(int(d))
def compute_dist_mat(
self,
):
n = self.n
a = self.a
b = self.b
t = self.t
g = csr_matrix(
(t, (a, b)),
shape=(n, n),
)
dist = floyd_warshall(
csgraph=g,
directed=False,
)
self.dist = dist
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == "__main__":
main()
| 17.80916
| 48
| 0.479211
|
from __future__ import annotations
from typing import Generator, NoReturn
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = self.async_readlines()
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import ABC, abstractmethod
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import floyd_warshall
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
n = reader.int()
m = reader.int()
a = [reader.int() for _ in range(3 * m)]
a = np.array(
a,
).reshape(m, 3)
a, b, t = a.T
self.n, self.m = n, m
self.a = a - 1
self.b = b - 1
self.t = t
def solve(self):
self.compute_dist_mat()
dist = self.dist
d = dist.max(axis=1).min()
print(int(d))
def compute_dist_mat(
self,
):
n = self.n
a = self.a
b = self.b
t = self.t
g = csr_matrix(
(t, (a, b)),
shape=(n, n),
)
dist = floyd_warshall(
csgraph=g,
directed=False,
)
self.dist = dist
def main():
t = 1
for _ in range(t):
Problem()()
if __name__ == "__main__":
main()
| true
| true
|
f70162a1e21c7e1848d4431d52ef94aa63177e6a
| 3,991
|
py
|
Python
|
tests/test_reducers_utils.py
|
jonathansick/astropy-librarian
|
f6a7cbd42ce0323b6d62f842a67eeeb7c31160b2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_reducers_utils.py
|
jonathansick/astropy-librarian
|
f6a7cbd42ce0323b6d62f842a67eeeb7c31160b2
|
[
"BSD-3-Clause"
] | 14
|
2020-03-30T17:25:17.000Z
|
2022-02-10T15:15:55.000Z
|
tests/test_reducers_utils.py
|
jonathansick/astropy-librarian
|
f6a7cbd42ce0323b6d62f842a67eeeb7c31160b2
|
[
"BSD-3-Clause"
] | 1
|
2021-05-01T21:32:09.000Z
|
2021-05-01T21:32:09.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for the astropylibrarian.reducers.utils module.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from astropylibrarian.reducers.utils import iter_sphinx_sections
if TYPE_CHECKING:
from .conftest import HtmlTestData
def test_iter_sphinx_sections(color_excess_tutorial: HtmlTestData) -> None:
"""Test the iter_sphinx_sections algorithm using the color-excess.html
notebook tutorial example.
This example is made complicated by the fact that the heading levels are
not strictly hierarchical. There are multiple "h1" tags.
"""
doc = color_excess_tutorial.parse()
root = doc.cssselect(".card .section")[0]
sections = []
for s in iter_sphinx_sections(
root_section=root,
base_url=color_excess_tutorial.url,
headers=[],
header_callback=lambda x: x.rstrip("¶"),
content_callback=lambda x: x.strip(),
):
sections.append(s)
assert len(sections) == 5
assert sections[0].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Learning Goals",
]
assert sections[0].header_level == 2
assert sections[0].url == (
"http://learn.astropy.org/rst-tutorials/color-excess.html"
"#learning-goals"
)
assert sections[0].content.startswith(
"Investigate extinction curve shapes"
)
assert sections[1].headings[-1] == "Keywords"
assert sections[1].header_level == 2
assert sections[1].content.startswith(
"dust extinction, synphot, astroquery, units, photometry, extinction,"
)
assert sections[2].headings[-1] == "Companion Content"
assert sections[2].header_level == 2
assert sections[2].content.startswith("Bessell & Murphy")
assert sections[3].headings[-1] == "Summary"
assert sections[3].header_level == 2
assert sections[3].content.startswith(
"In this tutorial, we will look at some extinction curves from the"
)
assert sections[4].headings[-1] == (
"Analyzing interstellar reddening and calculating synthetic "
"photometry"
)
assert sections[4].header_level == 1
# Demonstrate finding addition h1 sections on a page (that are supposed
# to be additional h2 sections in a hierarchical sense).
h1_heading = sections[-1].headings[-1]
for sibling in root.itersiblings(tag="div"):
if "section" in sibling.classes:
for s in iter_sphinx_sections(
root_section=sibling,
base_url=color_excess_tutorial.url,
headers=[h1_heading],
header_callback=lambda x: x.rstrip("¶"),
content_callback=lambda x: x.strip(),
):
sections.append(s)
assert sections[5].header_level == 2
assert sections[5].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Introduction",
]
assert sections[6].header_level == 2
assert sections[6].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 1: Investigate Extinction Models",
]
assert sections[7].header_level == 2
assert sections[7].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 2: Deredden a Spectrum",
]
assert sections[8].header_level == 3
assert sections[8].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 3: Calculate Color Excess with synphot",
"Exercise",
]
assert sections[9].header_level == 2
assert sections[9].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 3: Calculate Color Excess with synphot",
]
| 32.713115
| 78
| 0.656477
|
from __future__ import annotations
from typing import TYPE_CHECKING
from astropylibrarian.reducers.utils import iter_sphinx_sections
if TYPE_CHECKING:
from .conftest import HtmlTestData
def test_iter_sphinx_sections(color_excess_tutorial: HtmlTestData) -> None:
doc = color_excess_tutorial.parse()
root = doc.cssselect(".card .section")[0]
sections = []
for s in iter_sphinx_sections(
root_section=root,
base_url=color_excess_tutorial.url,
headers=[],
header_callback=lambda x: x.rstrip("¶"),
content_callback=lambda x: x.strip(),
):
sections.append(s)
assert len(sections) == 5
assert sections[0].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Learning Goals",
]
assert sections[0].header_level == 2
assert sections[0].url == (
"http://learn.astropy.org/rst-tutorials/color-excess.html"
"#learning-goals"
)
assert sections[0].content.startswith(
"Investigate extinction curve shapes"
)
assert sections[1].headings[-1] == "Keywords"
assert sections[1].header_level == 2
assert sections[1].content.startswith(
"dust extinction, synphot, astroquery, units, photometry, extinction,"
)
assert sections[2].headings[-1] == "Companion Content"
assert sections[2].header_level == 2
assert sections[2].content.startswith("Bessell & Murphy")
assert sections[3].headings[-1] == "Summary"
assert sections[3].header_level == 2
assert sections[3].content.startswith(
"In this tutorial, we will look at some extinction curves from the"
)
assert sections[4].headings[-1] == (
"Analyzing interstellar reddening and calculating synthetic "
"photometry"
)
assert sections[4].header_level == 1
h1_heading = sections[-1].headings[-1]
for sibling in root.itersiblings(tag="div"):
if "section" in sibling.classes:
for s in iter_sphinx_sections(
root_section=sibling,
base_url=color_excess_tutorial.url,
headers=[h1_heading],
header_callback=lambda x: x.rstrip("¶"),
content_callback=lambda x: x.strip(),
):
sections.append(s)
assert sections[5].header_level == 2
assert sections[5].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Introduction",
]
assert sections[6].header_level == 2
assert sections[6].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 1: Investigate Extinction Models",
]
assert sections[7].header_level == 2
assert sections[7].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 2: Deredden a Spectrum",
]
assert sections[8].header_level == 3
assert sections[8].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 3: Calculate Color Excess with synphot",
"Exercise",
]
assert sections[9].header_level == 2
assert sections[9].headings == [
"Analyzing interstellar reddening and calculating synthetic "
"photometry",
"Example 3: Calculate Color Excess with synphot",
]
| true
| true
|
f701631c5afbc2b367db071f5ff121b16ced9907
| 4,821
|
py
|
Python
|
tests/pbbs/maximalIndependentSet/run_omprace_small.py
|
rutgers-apl/omp-racer
|
a8a32e186950997b8eee7864f766819129a5ee06
|
[
"BSD-2-Clause"
] | 2
|
2020-09-17T15:18:49.000Z
|
2021-03-06T10:21:23.000Z
|
tests/pbbs/maximalIndependentSet/run_omprace_small.py
|
rutgers-apl/omp-racer
|
a8a32e186950997b8eee7864f766819129a5ee06
|
[
"BSD-2-Clause"
] | 1
|
2020-09-08T18:36:24.000Z
|
2020-09-17T15:18:27.000Z
|
tests/pbbs/maximalIndependentSet/run_omprace_small.py
|
rutgers-apl/omp-racer
|
a8a32e186950997b8eee7864f766819129a5ee06
|
[
"BSD-2-Clause"
] | 1
|
2021-01-19T15:28:15.000Z
|
2021-01-19T15:28:15.000Z
|
#!/usr/bin/python
import sys, string, os, popen2, shutil, platform, subprocess, pprint, time
import util, commands, csv
from math import sqrt
#clean up the src
do_clean = True
#build the src
do_build = True
#clean, build, and run the benchmarks
do_run = True
#collect data to plot
#do_collect_data = True
if do_clean and not do_build:
print "Clean - true and build - false not allowed"
exit(0)
configs = []
entry = { "NAME" : "RUN_ALL_BENCHMARKS",
"NUM_RUNS" : 1,
"CLEAN_LINE" : " make clean ",
"BUILD_LINE" : " make ",
"BUILD_ARCH" : "x86_64",
"RUN_ARCH" : "x86_64",
"RUN_LINE" : '/usr/bin/time -f "%E" ./',
#"RUN_LINE" : 'time ./',
"ARGS" : "",
}
configs.append(entry)
ref_cwd = os.getcwd()
arch = platform.machine()
full_hostname = platform.node()
hostname=full_hostname
bench_name="MIS"
benchmarks=[
"ndMIS"
]
inner_data_folder=[
"graphData/data"
]
input_file=[
"randLocalGraph_J_5_2500000"
]
executable=[
"MIS.openmp.dynamic",
"MIS.omprn",
"MIS.ompp.dynamic",
]
inputs=[
"-r 1 -o /tmp/ofile470293_748866 ../graphData/data/randLocalGraph_J_5_2500000"
]
if __name__ == "__main__":
with open('omprace.csv', 'wb') as csvfile:
res_writer = csv.writer(csvfile, delimiter=',')
res_writer.writerow(['test name', 'baseline openmp(s)', 'omprace no_inst(s)', 'omprace(s)', 'overhead ospg', 'overhead datarace', 'num violations'])
for config in configs:
util.log_heading(config["NAME"], character="-")
row = []
row.append(bench_name[0])
num_violations = -1
print('input file folder: ' + inner_data_folder[0])
data_input = inner_data_folder[0]+'/'+input_file[0]
print('checking if input data exists at:' + data_input)
if not os.path.exists(data_input):
print("input data doesn't exist. building input data")
util.chdir(ref_cwd + "/" + inner_data_folder[0])
build_data = config["BUILD_LINE"] + " " + input_file[0]
util.run_command(build_data, verbose=True)
util.chdir(ref_cwd)
else:
print("input data exists")
for b_index in range(len(executable)):
util.chdir(ref_cwd)
for i in range(0, config["NUM_RUNS"]):
try:
util.chdir(ref_cwd + "/" + benchmarks[0] )
util.log_heading(benchmarks[0], character="=")
try:
clean_string = config["CLEAN_LINE"]
util.run_command(clean_string, verbose=True)
except:
print "Clean failed"
build_bench_string = config["BUILD_LINE"]
util.run_command(build_bench_string, verbose=True)
util.log_heading("running: " + benchmarks[0], character="=")
run_string = config["RUN_LINE"] + executable[b_index] + " " + inputs[0]
#running applications
if b_index == 0:#warm up openmp run
util.run_command(run_string, verbose=True)
output_string = util.run_command(run_string, verbose=True)
output_lines = output_string.split('\n')
if b_index == len(executable)-1:
for output_line in output_lines:
if output_line.startswith("Number of violations ="):
num_violations=int(output_line[output_line.index('=')+1:])
time_line = output_lines[-2] #format is hh:mm:sec
time_line = time_line.split(':')
tot_secs = 0.0
for t in time_line:
tot_secs = (tot_secs*60) + float(t)
row.append(tot_secs)
print ('total secs= ' + str(tot_secs))
except util.ExperimentError, e:
print "Error: %s" % e
print "-----------"
print "%s" % e.output
continue
#finalize row
row.append("{0:.2f}".format(row[2]/row[1]))#ospg ov
row.append("{0:.2f}".format(row[3]/row[1]))#omprace ov
row.append(num_violations)
res_writer.writerow(row)
util.chdir(ref_cwd)
print("done")
| 33.950704
| 156
| 0.500933
|
import sys, string, os, popen2, shutil, platform, subprocess, pprint, time
import util, commands, csv
from math import sqrt
do_clean = True
do_build = True
do_run = True
if do_clean and not do_build:
print "Clean - true and build - false not allowed"
exit(0)
configs = []
entry = { "NAME" : "RUN_ALL_BENCHMARKS",
"NUM_RUNS" : 1,
"CLEAN_LINE" : " make clean ",
"BUILD_LINE" : " make ",
"BUILD_ARCH" : "x86_64",
"RUN_ARCH" : "x86_64",
"RUN_LINE" : '/usr/bin/time -f "%E" ./',
"ARGS" : "",
}
configs.append(entry)
ref_cwd = os.getcwd()
arch = platform.machine()
full_hostname = platform.node()
hostname=full_hostname
bench_name="MIS"
benchmarks=[
"ndMIS"
]
inner_data_folder=[
"graphData/data"
]
input_file=[
"randLocalGraph_J_5_2500000"
]
executable=[
"MIS.openmp.dynamic",
"MIS.omprn",
"MIS.ompp.dynamic",
]
inputs=[
"-r 1 -o /tmp/ofile470293_748866 ../graphData/data/randLocalGraph_J_5_2500000"
]
if __name__ == "__main__":
with open('omprace.csv', 'wb') as csvfile:
res_writer = csv.writer(csvfile, delimiter=',')
res_writer.writerow(['test name', 'baseline openmp(s)', 'omprace no_inst(s)', 'omprace(s)', 'overhead ospg', 'overhead datarace', 'num violations'])
for config in configs:
util.log_heading(config["NAME"], character="-")
row = []
row.append(bench_name[0])
num_violations = -1
print('input file folder: ' + inner_data_folder[0])
data_input = inner_data_folder[0]+'/'+input_file[0]
print('checking if input data exists at:' + data_input)
if not os.path.exists(data_input):
print("input data doesn't exist. building input data")
util.chdir(ref_cwd + "/" + inner_data_folder[0])
build_data = config["BUILD_LINE"] + " " + input_file[0]
util.run_command(build_data, verbose=True)
util.chdir(ref_cwd)
else:
print("input data exists")
for b_index in range(len(executable)):
util.chdir(ref_cwd)
for i in range(0, config["NUM_RUNS"]):
try:
util.chdir(ref_cwd + "/" + benchmarks[0] )
util.log_heading(benchmarks[0], character="=")
try:
clean_string = config["CLEAN_LINE"]
util.run_command(clean_string, verbose=True)
except:
print "Clean failed"
build_bench_string = config["BUILD_LINE"]
util.run_command(build_bench_string, verbose=True)
util.log_heading("running: " + benchmarks[0], character="=")
run_string = config["RUN_LINE"] + executable[b_index] + " " + inputs[0]
#running applications
if b_index == 0:#warm up openmp run
util.run_command(run_string, verbose=True)
output_string = util.run_command(run_string, verbose=True)
output_lines = output_string.split('\n')
if b_index == len(executable)-1:
for output_line in output_lines:
if output_line.startswith("Number of violations ="):
num_violations=int(output_line[output_line.index('=')+1:])
time_line = output_lines[-2] #format is hh:mm:sec
time_line = time_line.split(':')
tot_secs = 0.0
for t in time_line:
tot_secs = (tot_secs*60) + float(t)
row.append(tot_secs)
print ('total secs= ' + str(tot_secs))
except util.ExperimentError, e:
print "Error: %s" % e
print "-----------"
print "%s" % e.output
continue
#finalize row
row.append("{0:.2f}".format(row[2]/row[1]))#ospg ov
row.append("{0:.2f}".format(row[3]/row[1]))#omprace ov
row.append(num_violations)
res_writer.writerow(row)
util.chdir(ref_cwd)
print("done")
| false
| true
|
f70163d595617592249268b64bea9e62fee7ad0e
| 48,246
|
py
|
Python
|
jd_beauty_plant.py
|
21945764/Absinthe
|
9bc101d49fcd4e53d64dd6065fad8315543c4f17
|
[
"MIT"
] | 320
|
2022-02-07T11:50:19.000Z
|
2022-03-31T10:07:55.000Z
|
jd_beauty_plant.py
|
21945764/Absinthe
|
9bc101d49fcd4e53d64dd6065fad8315543c4f17
|
[
"MIT"
] | 25
|
2022-02-08T00:40:00.000Z
|
2022-03-31T06:18:08.000Z
|
jd_beauty_plant.py
|
21945764/Absinthe
|
9bc101d49fcd4e53d64dd6065fad8315543c4f17
|
[
"MIT"
] | 274
|
2022-02-07T11:35:01.000Z
|
2022-03-31T15:07:18.000Z
|
#!/bin/env python3
# -*- coding: utf-8 -*
'''
感谢Curtin提供的其他脚本供我参考
感谢aburd ch大佬的指导
项目名称:xF_jd_beauty_plant.py
Author: 一风一扬
功能:健康社区-种植园自动任务
Date: 2022-1-4
cron: 10 9,11,15,21 * * * jd_beauty_plant.py
new Env('化妆馆-种植园自动任务');
活动入口:25:/¥2EaeU74Gz07gJ%
教程:该活动与京东的ck通用,所以只需要填写第几个号运行改脚本就行了。
青龙变量填写export plant_cookie="1",代表京东CK的第一个号执行该脚本
多账号用&隔开,例如export plant_cookie="1&2",代表京东CK的第一、二个号执行该脚本。这样做,JD的ck过期就不用维护两次了,所以做出了更新。
青龙变量export choose_plant_id="true",表示自己选用浇水的ID,适用于种植了多个产品的人,默认为false,如果是false仅适用于种植了一个产品的人。
对于多账号的,只要有一个账号种植多个产品,都必须为true才能浇水。如果choose_plant_id="false",planted_id可以不填写变量值。
青龙变量export planted_id = 'xxxx',表示需要浇水的id,单账号可以先填写export planted_id = '111111',export choose_plant_id="true",运行一次脚本
日志输出会有planted_id,然后再重新修改export planted_id = 'xxxxxx'。多个账号也一样,如果2个账号export planted_id = '111111&111111'
3个账号export planted_id = '111111&111111&111111',以此类推。
注意:planted_id和ck位置要对应。而且你有多少个账号,就得填多少个planted_id,首次111111填写时,为6位数。
例如export plant_cookie="xxxx&xxxx&xxx",那export planted_id = "111111&111111&111111",也要写满3个id,这样才能保证所有账号都能跑
https://github.com/jsNO1/e
'''
######################################################以下代码请不要乱改######################################
UserAgent = ''
account = ''
cookie = ''
cookies = []
choose_plant_id = 'false'
planted_id = ''
shop_id = ''
beauty_plant_exchange = 'false'
planted_ids = []
import requests
import time, datetime
import requests, re, os, sys, random, json
from urllib.parse import quote, unquote
import threading
import urllib3
# urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings ()
today = datetime.datetime.now ().strftime ('%Y-%m-%d')
tomorrow = (datetime.datetime.now () + datetime.timedelta (days=1)).strftime ('%Y-%m-%d')
nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8')
time1 = '21:00:00.00000000'
time2 = '23:00:00.00000000'
flag_time1 = '{} {}'.format (today, time1)
flag_time2 = '{} {}'.format (today, time2)
pwd = os.path.dirname (os.path.abspath (__file__)) + os.sep
path = pwd + "env.sh"
sid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 32))
sid_ck = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdefABCDEFGHIJKLMNOPQRSTUVWXYZ', 43))
def printT(s):
print ("[{0}]: {1}".format (datetime.datetime.now ().strftime ("%Y-%m-%d %H:%M:%S"), s))
sys.stdout.flush ()
def getEnvs(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
except:
pass
try:
if '.' in label:
return float (label)
elif '&' in label:
return label.split ('&')
elif '@' in label:
return label.split ('@')
else:
return int (label)
except:
return label
# 获取v4环境 特殊处理
try:
with open (v4f, 'r', encoding='utf-8') as v4f:
v4Env = v4f.read ()
r = re.compile (r'^export\s(.*?)=[\'\"]?([\w\.\-@#&=_,\[\]\{\}\(\)]{1,})+[\'\"]{0,1}$',
re.M | re.S | re.I)
r = r.findall (v4Env)
curenv = locals ()
for i in r:
if i[0] != 'JD_COOKIE':
curenv[i[0]] = getEnvs (i[1])
except:
pass
############# 在pycharm测试ql环境用,实际用下面的代码运行 #########
# with open(path, "r+", encoding="utf-8") as f:
# ck = f.read()
# if "JD_COOKIE" in ck:
# r = re.compile (r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
# cookies = r.findall (ck)
# # print(cookies)
# # cookies = cookies[0]
# # print(cookies)
# # cookies = cookies.split ('&')
# printT ("已获取并使用ck环境 Cookie")
#######################################################################
if "plant_cookie" in os.environ:
if len (os.environ["plant_cookie"]) == 1:
is_ck = int(os.environ["plant_cookie"])
cookie1 = os.environ["JD_COOKIE"].split('&')
cookie = cookie1[is_ck-1]
printT ("已获取并使用Env环境cookie")
elif len (os.environ["plant_cookie"]) > 1:
cookies1 = []
cookies1 = os.environ["JD_COOKIE"]
cookies1 = cookies1.split ('&')
is_ck = os.environ["plant_cookie"].split('&')
for i in is_ck:
cookies.append(cookies1[int(i)-1])
printT ("已获取并使用Env环境plant_cookies")
else:
printT ("变量plant_cookie未填写")
exit (0)
if "choose_plant_id" in os.environ:
choose_plant_id = os.environ["choose_plant_id"]
printT (f"已获取并使用Env环境choose_plant_id={choose_plant_id}")
else:
printT ("变量choose_plant_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id")
if "planted_id" in os.environ:
if len (os.environ["planted_id"]) > 8:
planted_ids = os.environ["planted_id"]
planted_ids = planted_ids.split ('&')
else:
planted_id = os.environ["planted_id"]
printT (f"已获取并使用Env环境planted_id={planted_id}")
else:
printT ("变量planted_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id")
if "beauty_plant_exchange" in os.environ:
beauty_plant_exchange = os.environ["beauty_plant_exchange"]
printT (f"已获取并使用Env环境beauty_plant_exchange={beauty_plant_exchange}")
else:
printT ("变量beauty_plant_exchange未填写,默认为false,不用美妆币兑换肥料")
def userAgent():
"""
随机生成一个UA
:return: jdapp;iPhone;9.4.8;14.3;xxxx;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1
"""
if not UserAgent:
uuid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
addressid = ''.join (random.sample ('1234567898647', 10))
iosVer = ''.join (
random.sample (["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iosV = iosVer.replace ('.', '_')
iPhone = ''.join (random.sample (["8", "9", "10", "11", "12", "13"], 1))
ADID = ''.join (random.sample ('0987654321ABCDEF', 8)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (random.sample ('0987654321ABCDEF', 12))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone{iPhone},1;addressid/{addressid};supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1'
else:
return UserAgent
## 获取通知服务
class msg (object):
def __init__(self, m=''):
self.str_msg = m
self.message ()
def message(self):
global msg_info
printT (self.str_msg)
try:
msg_info = "{}\n{}".format (msg_info, self.str_msg)
except:
msg_info = "{}".format (self.str_msg)
sys.stdout.flush () # 这代码的作用就是刷新缓冲区。
# 当我们打印一些字符时,并不是调用print函数后就立即打印的。一般会先将字符送到缓冲区,然后再打印。
# 这就存在一个问题,如果你想等时间间隔的打印一些字符,但由于缓冲区没满,不会打印。就需要采取一些手段。如每次打印后强行刷新缓冲区。
def getsendNotify(self, a=0):
if a == 0:
a += 1
try:
url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py'
response = requests.get (url)
if 'curtinlv' in response.text:
with open ('sendNotify.py', "w+", encoding="utf-8") as f:
f.write (response.text)
else:
if a < 5:
a += 1
return self.getsendNotify (a)
else:
pass
except:
if a < 5:
a += 1
return self.getsendNotify (a)
else:
pass
def main(self):
global send
cur_path = os.path.abspath (os.path.dirname (__file__))
sys.path.append (cur_path)
if os.path.exists (cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
self.getsendNotify ()
try:
from sendNotify import send
except:
printT ("加载通知服务失败~")
else:
self.getsendNotify ()
try:
from sendNotify import send
except:
printT ("加载通知服务失败~")
###################
msg ().main ()
def setName(cookie):
try:
r = re.compile (r"pt_pin=(.*?);") # 指定一个规则:查找pt_pin=与;之前的所有字符,但pt_pin=与;不复制。r"" 的作用是去除转义字符.
userName = r.findall (cookie) # 查找pt_pin=与;之前的所有字符,并复制给r,其中pt_pin=与;不复制。
# print (userName)
userName = unquote (userName[0]) # r.findall(cookie)赋值是list列表,这个赋值为字符串
# print(userName)
return userName
except Exception as e:
print (e, "cookie格式有误!")
exit (2)
# 获取ck
def get_ck(token, sid_ck, account):
try:
url = 'https://api.m.jd.com/client.action?functionId=isvObfuscator'
headers = {
# 'Connection': 'keep-alive',
'accept': '*/*',
"cookie": f"{token}",
'host': 'api.m.jd.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'user-Agent': "JD4iPhone/167922%20(iPhone;%20iOS;%20Scale/2.00)",
'accept-Encoding': 'gzip, deflate, br',
'accept-Language': 'zh-Hans-CN;q=1',
"content-type": "application/x-www-form-urlencoded",
# "content-length":"1348",
}
timestamp = int (round (time.time () * 1000))
timestamp1 = int (timestamp / 1000)
data = r'body=%7B%22url%22%3A%22https%3A%5C/%5C/xinruismzd-isv.isvjcloud.com%22%2C%22id%22%3A%22%22%7D&build=167922&client=apple&clientVersion=10.3.2&d_brand=apple&d_model=iPhone12%2C1&ef=1&eid=eidI4a9081236as4w7JpXa5zRZuwROIEo3ORpcOyassXhjPBIXtrtbjusqCxeW3E1fOtHUlGhZUCur1Q1iocDze1pQ9jBDGfQs8UXxMCTz02fk0RIHpB&ep=%7B%22ciphertype%22%3A5%2C%22cipher%22%3A%7B%22screen%22%3A%22ENS4AtO3EJS%3D%22%2C%22wifiBssid%22%3A%22' + f"{sid_ck}" + r'%3D%22%2C%22osVersion%22%3A%22CJUkCK%3D%3D%22%2C%22area%22%3A%22CJvpCJY1DV80ENY2XzK%3D%22%2C%22openudid%22%3A%22Ytq3YtKyDzO5CJuyZtu4CWSyZtC0Ytc1CJLsDwC5YwO0YtS5CNrsCK%3D%3D%22%2C%22uuid%22%3A%22aQf1ZRdxb2r4ovZ1EJZhcxYlVNZSZz09%22%7D%2C%22ts%22%3A1642002985%2C%22hdid%22%3A%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw%3D%22%2C%22version%22%3A%221.0.3%22%2C%22appname%22%3A%22com.360buy.jdmobile%22%2C%22ridx%22%3A-1%7D&ext=%7B%22prstate%22%3A%220%22%2C%22pvcStu%22%3A%221%22%7D&isBackground=N&joycious=88&lang=zh_CN&networkType=wifi&networklibtype=JDNetworkBaseAF&partner=apple&rfs=0000&scope=01&sign=946db60626658b250cf47aafb6f67691&st=1642002999847&sv=112&uemps=0-0&uts=0f31TVRjBSu3kkqwe7t25AkQCKuzV3pz8JrojVuU0630g%2BkZigs9kTwRghT26sE72/e92RRKan/%2B9SRjIJYCLuhew91djUwnIY47k31Rwne/U1fOHHr9FmR31X03JKJjwao/EC1gy4fj7PV1Co0ZOjiCMTscFo/8id2r8pCHYMZcaeH3yPTLq1MyFF3o3nkStM/993MbC9zim7imw8b1Fg%3D%3D'
# data = '{"token":"AAFh3ANjADAPSunyKSzXTA-UDxrs3Tn9hoy92x4sWmVB0Kv9ey-gAMEdJaSDWLWtnMX8lqLujBo","source":"01"}'
# print(data)
response = requests.post (url=url, verify=False, headers=headers, data=data)
result = response.json ()
# print(result)
access_token = result['token']
# print(access_token)
return access_token
except Exception as e:
msg ("账号【{0}】获取ck失败,cookie过期".format (account))
# 获取Authorization
def get_Authorization(access_token, account):
try:
url = 'https://xinruimz-isv.isvjcloud.com/papi/auth'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": 'Bearer undefined',
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/logined_jd/',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Origin": "https://xinruimz-isv.isvjcloud.com",
"Content-Type": "application/json;charset=utf-8",
}
data = '{"token":"' + f"{access_token}" + r'","source":"01"}'
# print(data)
response = requests.post (url=url, verify=False, headers=headers, data=data)
result = response.json ()
print (result)
access_token = result['access_token']
access_token = r"Bearer " + access_token
# print(access_token)
return access_token
except Exception as e:
msg ("账号【{0}】获取Authorization失败,cookie过期".format (account))
# 获取已种植的信息
def get_planted_info(cookie, sid, account):
name_list = []
planted_id_list = []
position_list = []
shop_id_list = []
url = 'https://xinruimz-isv.isvjcloud.com/papi/get_home_info'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
# print(result)
planted_list = result['plant_info']
# print(planted_list)
for i in range (len (planted_list)):
try:
name = result['plant_info'][f'{i + 1}']['data']['name']
planted_id = result['plant_info'][f'{i + 1}']['data']['id']
position = result['plant_info'][f'{i + 1}']['data']['position']
shop_id = result['plant_info'][f'{i + 1}']['data']['shop_id']
# print(name,planted_id,position,shop_id)
name_list.append (name)
planted_id_list.append (planted_id)
position_list.append (position)
shop_id_list.append (shop_id)
print (f"账号{account}种植的种子为", name, "planted_id:", planted_id, ",shop_id:", shop_id)
except Exception as e:
pass
return name_list, position_list, shop_id_list, planted_id_list
# 领取每日水滴
def get_water(cookie, position, sid, account):
try:
j = 0
url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_water'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
for i in position:
data = r'{"position":' + f"{i}" + r'}'
response = requests.post (url=url, verify=False, headers=headers, data=data)
# print(response.status_code)
if response.status_code == 204:
j += 1
total = j * 10
if response.status_code == 204:
msg ("账号【{0}】成功领取每日水滴{1}".format (account, total))
except Exception as e:
msg ("账号【{0}】领取每日水滴失败,可能是cookie过期".format (account))
# 领取每日肥料
def get_fertilizer(cookie, shop_id, account):
try:
j = 0
url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_fertilizer'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
for i in shop_id:
data = r'{"shop_id":' + f"{i}" + r'}'
response = requests.post (url=url, verify=False, headers=headers, data=data)
if response.status_code == 204:
j += 1
total = j * 10
if response.status_code == 204:
msg ("账号【{0}】成功领取每日肥料{1}".format (account, total))
except Exception as e:
msg ("账号【{0}】领取每日肥料失败,可能是cookie过期".format (account))
# 获取任务信息
def get_task(cookie, account):
try:
taskName_list = []
taskId_list = []
taskName_list2 = []
taskId_list2 = []
taskName_list3 = []
taskId_list3 = []
url = 'https://xinruimz-isv.isvjcloud.com/papi/water_task_info'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
# print(result)
task_list = result['shops']
task_list2 = result['meetingplaces']
task_list3 = result['prodcuts'] # 浏览加购
# print(task_list)
for i in range (len (task_list)):
try:
taskName = task_list[i]['name']
taskId = task_list[i]['id']
taskId_list.append (taskId)
taskName_list.append (taskName)
except Exception as e:
print (e)
for i in range (len (task_list2)):
try:
taskName2 = task_list2[i]['name']
taskId2 = task_list2[i]['id']
taskId_list2.append (taskId2)
taskName_list2.append (taskName2)
except Exception as e:
print (e)
for i in range (len (task_list3)):
try:
taskName3 = task_list3[i]['name']
taskId3 = task_list3[i]['id']
taskId_list3.append (taskId3)
taskName_list3.append (taskName3)
except Exception as e:
print (e)
# print(taskName_list,taskId_list,taskName_list2,taskId_list2,taskName_list3,taskId_list3)
return taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3
except Exception as e:
print (e)
message = result['message']
if "非法店铺" in message:
msg ("【账号{0}】种子过期,请重新种植".format (account))
# 获取任务信息
def get_fertilizer_task(cookie, shop_id, account):
try:
# taskName_list = []
# taskId_list = []
taskName_list2 = []
taskId_list2 = []
taskName_list3 = []
taskId_list3 = []
taskName_list4 = []
taskId_list4 = []
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_task_info?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
# print(result)
# task_list = result['shops']
task_list2 = result['meetingplaces']
task_list3 = result['prodcuts'] # 浏览加购
task_list4 = result['live'] # 浏览直播
# print(task_list)
# for i in range (len (task_list)):
# try:
# taskName = task_list[i]['name']
# taskId = task_list[i]['id']
# taskId_list.append(taskId)
# taskName_list.append(taskName)
# except Exception as e:
# print(e)
for i in range (len (task_list2)):
try:
taskName2 = task_list2[i]['name']
taskId2 = task_list2[i]['id']
taskId_list2.append (taskId2)
taskName_list2.append (taskName2)
except Exception as e:
print (e)
for i in range (len (task_list3)):
try:
taskName3 = task_list3[i]['name']
taskId3 = task_list3[i]['id']
taskId_list3.append (taskId3)
taskName_list3.append (taskName3)
except Exception as e:
print (e)
for i in range (len (task_list4)):
try:
taskName4 = task_list4[i]['name']
taskId4 = task_list4[i]['id']
taskId_list4.append (taskId4)
taskName_list4.append (taskName4)
except Exception as e:
print (e)
# print(taskName_list,taskId_list,taskName_list2,taskId_list2,taskName_list3,taskId_list3)
return taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4
except Exception as e:
print (e)
message = result['message']
if "非法店铺" in message:
msg ("【账号{0}】种子过期,请重新种植".format (account))
# 做任务1
def do_task1(cookie, taskName, taskId, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_shop_view?shop_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 做浏览任务
def do_task2(cookie, taskName, taskId, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_meetingplace_view?meetingplace_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 浏览加购
def do_task3(cookie, taskName, taskId, sid, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_product_view?product_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览加购【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览加购【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-浏览关注
def do_fertilizer_task(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_shop_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
while True:
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【浏览关注】等待10秒".format (account))
msg ("账号【{0}】执行【浏览关注】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-浏览
def do_fertilizer_task2(cookie, name, meetingplace_id, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_meetingplace_view?meetingplace_id={meetingplace_id}&shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览关注{1}等待10秒".format (account, name))
msg ("账号【{0}】执行浏览关注{1}任务成功,获取【{2}】肥料".format (account, name, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-加购
def do_fertilizer_task3(cookie, name, product_id, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_product_view?product_id={product_id}&shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
while True:
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览并加购{1}等待10秒".format (account, name))
msg ("账号【{0}】执行浏览并加购{1}任务成功,获取【{2}】肥料".format (account, name, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-观看其他小样
def do_fertilizer_task4(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_sample_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【观看其他小样】等待10秒".format (account))
msg ("账号【{0}】执行【观看其他小样】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-浏览化妆馆
def do_fertilizer_task5(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_chanel_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【浏览化妆馆】等待10秒".format (account))
msg ("账号【{0}】执行【浏览化妆馆】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
# 施肥中的任务-美妆币兑换,每天5次
def do_fertilizer_task6(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_exchange?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
# "Content-Type": "application/json;charset=utf-8",
}
for i in range (5):
response = requests.get (url=url, verify=False, headers=headers) # data中有汉字,需要encode为utf-8
result = response.json ()
# print(result)
score = result['inc']
print ("账号【{0}】【shop_id:{1}】正在【兑换肥料】等待10秒".format (account, shop_id))
msg ("账号【{0}】【shop_id:{2}】执行【兑换肥料】任务成功,获取【{1}】肥料".format (account, score, shop_id))
time.sleep (10)
except Exception as e:
print (e)
msg ("账号【{0}】【shop_id:{1}】肥料兑换已达上限".format (account, shop_id))
time.sleep (1)
# 浇水
def watering(cookie, plant_id, sid, account):
try:
url = 'https://xinruimz-isv.isvjcloud.com/papi/watering'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
data = r'{"plant_id":' + f"{plant_id}" + r'}'
while True:
response = requests.post (url=url, verify=False, headers=headers,
data=data.encode ()) # data中有汉字,需要encode为utf-8
result = response.json ()
# print(result)
level = result['level'] # 当前等级
complete_level = result['complete_level'] # 完成等级
msg ("【账号{0}】【plant_id:{3}】成功浇水10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id))
time.sleep (5)
except Exception as e:
print(e)
# pass
# 施肥
def fertilization(cookie, plant_id, shop_id, account):
url = 'https://xinruimz-isv.isvjcloud.com/papi/fertilization'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
# 'User-Agent': 'jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1',
'User-Agent': userAgent (),
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
data = r'{"plant_id":' + f"{plant_id}" + r'}'
i = 1
while True:
try:
response = requests.post (url=url, verify=False, headers=headers, data=data) # data中有汉字,需要encode为utf-8
result = response.json ()
# print(result)
level = result['level'] # 当前等级
complete_level = result['complete_level'] # 完成等级
printT ("【账号{0}】【plant_id:{3}】成功施肥10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id))
time.sleep (5)
i += 1
except Exception as e:
# print(e)
message = result['message']
total = i * 10
if "肥料不足" in message:
msg("【账号{0}】【plant_id:{1}】本次一共施肥{2}g".format (account, plant_id,total))
printT ("【账号{0}】【plant_id:{1}】肥料不足10g".format (account, plant_id))
break
def start():
global cookie, cookies
print (f"\n【准备开始...】\n")
nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8')
if cookie != '':
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie,account)
get_water (cookie, position_list, sid, account)
get_fertilizer (cookie, shop_id_list, account)
for i, j in zip (taskName_list, taskId_list):
do_task1 (cookie, i, j, account)
for i, j in zip (taskName_list2, taskId_list2):
do_task2 (cookie, i, j, account)
for i, j in zip (taskName_list3, taskId_list3):
do_task3 (cookie, i, j, sid, account)
flag = 0
for i in shop_id_list:
do_fertilizer_task (cookie, i, account) # 浏览关注
for k in shop_id_list:
taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task (cookie, k, account)
do_fertilizer_task4 (cookie, k, account)
do_fertilizer_task5 (cookie, k, account)
if beauty_plant_exchange == 'true':
do_fertilizer_task6 (cookie, k, account)
for i, j in zip (taskName_list2, taskId_list2):
print (i, j, k)
do_fertilizer_task2 (cookie, i, j, k, account) # 浏览
for i, j in zip (taskName_list3, taskId_list3):
print (i, j, k)
do_fertilizer_task3 (cookie, i, j, k, account) # 加购
if choose_plant_id == 'false':
for i in planted_id_list:
watering (cookie, i, sid, account)
fertilization (cookie, i, k, account)
else:
fertilization (cookie, planted_id_list[flag], k, account)
watering (cookie, planted_id, sid, account)
flag += 1
elif cookies != '':
for cookie, planted_id in zip (cookies, planted_ids):
try:
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
except Exception as e:
pass
for cookie, planted_id in zip (cookies, planted_ids):
try:
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie, account)
get_water (cookie, position_list, sid, account)
get_fertilizer (cookie, shop_id_list, account)
for i, j in zip (taskName_list, taskId_list):
do_task1 (cookie, i, j, account)
for i, j in zip (taskName_list2, taskId_list2):
do_task2 (cookie, i, j, account)
for i, j in zip (taskName_list3, taskId_list3):
do_task3 (cookie, i, j, sid, account)
flag = 0
for i in shop_id_list:
do_fertilizer_task (cookie, i, account) # 浏览关注
for k in shop_id_list:
taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task (
cookie, k, account)
do_fertilizer_task4 (cookie, k, account)
do_fertilizer_task5 (cookie, k, account)
if beauty_plant_exchange == 'true':
do_fertilizer_task6 (cookie, k, account)
for i, j in zip (taskName_list2, taskId_list2):
print (i, j, k)
do_fertilizer_task2 (cookie, i, j, k, account) # 浏览
for i, j in zip (taskName_list3, taskId_list3):
print (i, j, k)
do_fertilizer_task3 (cookie, i, j, k, account) # 加购
if choose_plant_id == 'false':
for i in planted_id_list:
fertilization (cookie, i, k, account)
watering (cookie, i, sid, account)
else:
print("【账号{}现在开始施肥】".format(account))
fertilization (cookie, planted_id_list[flag], k, account)
print ("【账号{}现在开始浇水】".format (account))
watering (cookie, planted_id, sid, account)
flag += 1
except Exception as e:
pass
else:
printT ("请检查变量plant_cookie是否已填写")
if __name__ == '__main__':
printT ("美丽研究院-种植园")
start ()
# if '成熟' in msg_info:
# send ("美丽研究院-种植园", msg_info)
if '成功' in msg_info:
send ("美丽研究院-种植园", msg_info)
| 48.197802
| 1,343
| 0.613046
|
UserAgent = ''
account = ''
cookie = ''
cookies = []
choose_plant_id = 'false'
planted_id = ''
shop_id = ''
beauty_plant_exchange = 'false'
planted_ids = []
import requests
import time, datetime
import requests, re, os, sys, random, json
from urllib.parse import quote, unquote
import threading
import urllib3
requests.packages.urllib3.disable_warnings ()
today = datetime.datetime.now ().strftime ('%Y-%m-%d')
tomorrow = (datetime.datetime.now () + datetime.timedelta (days=1)).strftime ('%Y-%m-%d')
nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8')
time1 = '21:00:00.00000000'
time2 = '23:00:00.00000000'
flag_time1 = '{} {}'.format (today, time1)
flag_time2 = '{} {}'.format (today, time2)
pwd = os.path.dirname (os.path.abspath (__file__)) + os.sep
path = pwd + "env.sh"
sid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 32))
sid_ck = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdefABCDEFGHIJKLMNOPQRSTUVWXYZ', 43))
def printT(s):
print ("[{0}]: {1}".format (datetime.datetime.now ().strftime ("%Y-%m-%d %H:%M:%S"), s))
sys.stdout.flush ()
def getEnvs(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
except:
pass
try:
if '.' in label:
return float (label)
elif '&' in label:
return label.split ('&')
elif '@' in label:
return label.split ('@')
else:
return int (label)
except:
return label
try:
with open (v4f, 'r', encoding='utf-8') as v4f:
v4Env = v4f.read ()
r = re.compile (r'^export\s(.*?)=[\'\"]?([\w\.\-@#&=_,\[\]\{\}\(\)]{1,})+[\'\"]{0,1}$',
re.M | re.S | re.I)
r = r.findall (v4Env)
curenv = locals ()
for i in r:
if i[0] != 'JD_COOKIE':
curenv[i[0]] = getEnvs (i[1])
except:
pass
if "plant_cookie" in os.environ:
if len (os.environ["plant_cookie"]) == 1:
is_ck = int(os.environ["plant_cookie"])
cookie1 = os.environ["JD_COOKIE"].split('&')
cookie = cookie1[is_ck-1]
printT ("已获取并使用Env环境cookie")
elif len (os.environ["plant_cookie"]) > 1:
cookies1 = []
cookies1 = os.environ["JD_COOKIE"]
cookies1 = cookies1.split ('&')
is_ck = os.environ["plant_cookie"].split('&')
for i in is_ck:
cookies.append(cookies1[int(i)-1])
printT ("已获取并使用Env环境plant_cookies")
else:
printT ("变量plant_cookie未填写")
exit (0)
if "choose_plant_id" in os.environ:
choose_plant_id = os.environ["choose_plant_id"]
printT (f"已获取并使用Env环境choose_plant_id={choose_plant_id}")
else:
printT ("变量choose_plant_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id")
if "planted_id" in os.environ:
if len (os.environ["planted_id"]) > 8:
planted_ids = os.environ["planted_id"]
planted_ids = planted_ids.split ('&')
else:
planted_id = os.environ["planted_id"]
printT (f"已获取并使用Env环境planted_id={planted_id}")
else:
printT ("变量planted_id未填写,默认为false只种植了一个,如果种植了多个,请填写改变量planted_id")
if "beauty_plant_exchange" in os.environ:
beauty_plant_exchange = os.environ["beauty_plant_exchange"]
printT (f"已获取并使用Env环境beauty_plant_exchange={beauty_plant_exchange}")
else:
printT ("变量beauty_plant_exchange未填写,默认为false,不用美妆币兑换肥料")
def userAgent():
if not UserAgent:
uuid = ''.join (random.sample ('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
addressid = ''.join (random.sample ('1234567898647', 10))
iosVer = ''.join (
random.sample (["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iosV = iosVer.replace ('.', '_')
iPhone = ''.join (random.sample (["8", "9", "10", "11", "12", "13"], 1))
ADID = ''.join (random.sample ('0987654321ABCDEF', 8)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (
random.sample ('0987654321ABCDEF', 4)) + '-' + ''.join (random.sample ('0987654321ABCDEF', 12))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone{iPhone},1;addressid/{addressid};supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1'
else:
return UserAgent
class msg (object):
def __init__(self, m=''):
self.str_msg = m
self.message ()
def message(self):
global msg_info
printT (self.str_msg)
try:
msg_info = "{}\n{}".format (msg_info, self.str_msg)
except:
msg_info = "{}".format (self.str_msg)
sys.stdout.flush ()
def getsendNotify(self, a=0):
if a == 0:
a += 1
try:
url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py'
response = requests.get (url)
if 'curtinlv' in response.text:
with open ('sendNotify.py', "w+", encoding="utf-8") as f:
f.write (response.text)
else:
if a < 5:
a += 1
return self.getsendNotify (a)
else:
pass
except:
if a < 5:
a += 1
return self.getsendNotify (a)
else:
pass
def main(self):
global send
cur_path = os.path.abspath (os.path.dirname (__file__))
sys.path.append (cur_path)
if os.path.exists (cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
self.getsendNotify ()
try:
from sendNotify import send
except:
printT ("加载通知服务失败~")
else:
self.getsendNotify ()
try:
from sendNotify import send
except:
printT ("加载通知服务失败~")
msg ().main ()
def setName(cookie):
try:
r = re.compile (r"pt_pin=(.*?);") userName = r.findall (cookie) userName = unquote (userName[0]) return userName
except Exception as e:
print (e, "cookie格式有误!")
exit (2)
def get_ck(token, sid_ck, account):
try:
url = 'https://api.m.jd.com/client.action?functionId=isvObfuscator'
headers = {
'accept': '*/*',
"cookie": f"{token}",
'host': 'api.m.jd.com',
'user-Agent': "JD4iPhone/167922%20(iPhone;%20iOS;%20Scale/2.00)",
'accept-Encoding': 'gzip, deflate, br',
'accept-Language': 'zh-Hans-CN;q=1',
"content-type": "application/x-www-form-urlencoded",
}
timestamp = int (round (time.time () * 1000))
timestamp1 = int (timestamp / 1000)
data = r'body=%7B%22url%22%3A%22https%3A%5C/%5C/xinruismzd-isv.isvjcloud.com%22%2C%22id%22%3A%22%22%7D&build=167922&client=apple&clientVersion=10.3.2&d_brand=apple&d_model=iPhone12%2C1&ef=1&eid=eidI4a9081236as4w7JpXa5zRZuwROIEo3ORpcOyassXhjPBIXtrtbjusqCxeW3E1fOtHUlGhZUCur1Q1iocDze1pQ9jBDGfQs8UXxMCTz02fk0RIHpB&ep=%7B%22ciphertype%22%3A5%2C%22cipher%22%3A%7B%22screen%22%3A%22ENS4AtO3EJS%3D%22%2C%22wifiBssid%22%3A%22' + f"{sid_ck}" + r'%3D%22%2C%22osVersion%22%3A%22CJUkCK%3D%3D%22%2C%22area%22%3A%22CJvpCJY1DV80ENY2XzK%3D%22%2C%22openudid%22%3A%22Ytq3YtKyDzO5CJuyZtu4CWSyZtC0Ytc1CJLsDwC5YwO0YtS5CNrsCK%3D%3D%22%2C%22uuid%22%3A%22aQf1ZRdxb2r4ovZ1EJZhcxYlVNZSZz09%22%7D%2C%22ts%22%3A1642002985%2C%22hdid%22%3A%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw%3D%22%2C%22version%22%3A%221.0.3%22%2C%22appname%22%3A%22com.360buy.jdmobile%22%2C%22ridx%22%3A-1%7D&ext=%7B%22prstate%22%3A%220%22%2C%22pvcStu%22%3A%221%22%7D&isBackground=N&joycious=88&lang=zh_CN&networkType=wifi&networklibtype=JDNetworkBaseAF&partner=apple&rfs=0000&scope=01&sign=946db60626658b250cf47aafb6f67691&st=1642002999847&sv=112&uemps=0-0&uts=0f31TVRjBSu3kkqwe7t25AkQCKuzV3pz8JrojVuU0630g%2BkZigs9kTwRghT26sE72/e92RRKan/%2B9SRjIJYCLuhew91djUwnIY47k31Rwne/U1fOHHr9FmR31X03JKJjwao/EC1gy4fj7PV1Co0ZOjiCMTscFo/8id2r8pCHYMZcaeH3yPTLq1MyFF3o3nkStM/993MbC9zim7imw8b1Fg%3D%3D'
response = requests.post (url=url, verify=False, headers=headers, data=data)
result = response.json ()
access_token = result['token']
return access_token
except Exception as e:
msg ("账号【{0}】获取ck失败,cookie过期".format (account))
def get_Authorization(access_token, account):
try:
url = 'https://xinruimz-isv.isvjcloud.com/papi/auth'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": 'Bearer undefined',
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/logined_jd/',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Origin": "https://xinruimz-isv.isvjcloud.com",
"Content-Type": "application/json;charset=utf-8",
}
data = '{"token":"' + f"{access_token}" + r'","source":"01"}'
response = requests.post (url=url, verify=False, headers=headers, data=data)
result = response.json ()
print (result)
access_token = result['access_token']
access_token = r"Bearer " + access_token
return access_token
except Exception as e:
msg ("账号【{0}】获取Authorization失败,cookie过期".format (account))
def get_planted_info(cookie, sid, account):
name_list = []
planted_id_list = []
position_list = []
shop_id_list = []
url = 'https://xinruimz-isv.isvjcloud.com/papi/get_home_info'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
planted_list = result['plant_info']
for i in range (len (planted_list)):
try:
name = result['plant_info'][f'{i + 1}']['data']['name']
planted_id = result['plant_info'][f'{i + 1}']['data']['id']
position = result['plant_info'][f'{i + 1}']['data']['position']
shop_id = result['plant_info'][f'{i + 1}']['data']['shop_id']
name_list.append (name)
planted_id_list.append (planted_id)
position_list.append (position)
shop_id_list.append (shop_id)
print (f"账号{account}种植的种子为", name, "planted_id:", planted_id, ",shop_id:", shop_id)
except Exception as e:
pass
return name_list, position_list, shop_id_list, planted_id_list
def get_water(cookie, position, sid, account):
try:
j = 0
url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_water'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
for i in position:
data = r'{"position":' + f"{i}" + r'}'
response = requests.post (url=url, verify=False, headers=headers, data=data)
if response.status_code == 204:
j += 1
total = j * 10
if response.status_code == 204:
msg ("账号【{0}】成功领取每日水滴{1}".format (account, total))
except Exception as e:
msg ("账号【{0}】领取每日水滴失败,可能是cookie过期".format (account))
def get_fertilizer(cookie, shop_id, account):
try:
j = 0
url = 'https://xinruimz-isv.isvjcloud.com/papi/collect_fertilizer'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
for i in shop_id:
data = r'{"shop_id":' + f"{i}" + r'}'
response = requests.post (url=url, verify=False, headers=headers, data=data)
if response.status_code == 204:
j += 1
total = j * 10
if response.status_code == 204:
msg ("账号【{0}】成功领取每日肥料{1}".format (account, total))
except Exception as e:
msg ("账号【{0}】领取每日肥料失败,可能是cookie过期".format (account))
def get_task(cookie, account):
try:
taskName_list = []
taskId_list = []
taskName_list2 = []
taskId_list2 = []
taskName_list3 = []
taskId_list3 = []
url = 'https://xinruimz-isv.isvjcloud.com/papi/water_task_info'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
task_list = result['shops']
task_list2 = result['meetingplaces']
task_list3 = result['prodcuts'] for i in range (len (task_list)):
try:
taskName = task_list[i]['name']
taskId = task_list[i]['id']
taskId_list.append (taskId)
taskName_list.append (taskName)
except Exception as e:
print (e)
for i in range (len (task_list2)):
try:
taskName2 = task_list2[i]['name']
taskId2 = task_list2[i]['id']
taskId_list2.append (taskId2)
taskName_list2.append (taskName2)
except Exception as e:
print (e)
for i in range (len (task_list3)):
try:
taskName3 = task_list3[i]['name']
taskId3 = task_list3[i]['id']
taskId_list3.append (taskId3)
taskName_list3.append (taskName3)
except Exception as e:
print (e)
return taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3
except Exception as e:
print (e)
message = result['message']
if "非法店铺" in message:
msg ("【账号{0}】种子过期,请重新种植".format (account))
def get_fertilizer_task(cookie, shop_id, account):
try:
taskName_list2 = []
taskId_list2 = []
taskName_list3 = []
taskId_list3 = []
taskName_list4 = []
taskId_list4 = []
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_task_info?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
response = requests.get (url=url, verify=False, headers=headers)
result = response.json ()
task_list2 = result['meetingplaces']
task_list3 = result['prodcuts'] task_list4 = result['live'] for i in range (len (task_list2)):
try:
taskName2 = task_list2[i]['name']
taskId2 = task_list2[i]['id']
taskId_list2.append (taskId2)
taskName_list2.append (taskName2)
except Exception as e:
print (e)
for i in range (len (task_list3)):
try:
taskName3 = task_list3[i]['name']
taskId3 = task_list3[i]['id']
taskId_list3.append (taskId3)
taskName_list3.append (taskName3)
except Exception as e:
print (e)
for i in range (len (task_list4)):
try:
taskName4 = task_list4[i]['name']
taskId4 = task_list4[i]['id']
taskId_list4.append (taskId4)
taskName_list4.append (taskName4)
except Exception as e:
print (e)
return taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4
except Exception as e:
print (e)
message = result['message']
if "非法店铺" in message:
msg ("【账号{0}】种子过期,请重新种植".format (account))
def do_task1(cookie, taskName, taskId, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_shop_view?shop_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_task2(cookie, taskName, taskId, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_meetingplace_view?meetingplace_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': 'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id=12&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览任务【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览任务【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_task3(cookie, taskName, taskId, sid, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/water_product_view?product_id={taskId}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览加购【{1}】等待10秒".format (account, taskName))
msg ("账号【{0}】执行浏览加购【{1}】成功,获取【{2}】水滴".format (account, taskName, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_shop_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
while True:
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【浏览关注】等待10秒".format (account))
msg ("账号【{0}】执行【浏览关注】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task2(cookie, name, meetingplace_id, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_meetingplace_view?meetingplace_id={meetingplace_id}&shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览关注{1}等待10秒".format (account, name))
msg ("账号【{0}】执行浏览关注{1}任务成功,获取【{2}】肥料".format (account, name, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task3(cookie, name, product_id, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_product_view?product_id={product_id}&shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
while True:
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行浏览并加购{1}等待10秒".format (account, name))
msg ("账号【{0}】执行浏览并加购{1}任务成功,获取【{2}】肥料".format (account, name, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task4(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_sample_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【观看其他小样】等待10秒".format (account))
msg ("账号【{0}】执行【观看其他小样】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task5(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_chanel_view?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
print (result)
score = result['inc']
print ("账号【{0}】执行【浏览化妆馆】等待10秒".format (account))
msg ("账号【{0}】执行【浏览化妆馆】任务成功,获取【{1}】肥料".format (account, score))
time.sleep (10)
except Exception as e:
print (e)
time.sleep (1)
def do_fertilizer_task6(cookie, shop_id, account):
try:
url = f'https://xinruimz-isv.isvjcloud.com/papi/fertilizer_exchange?shop_id={shop_id}'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
}
for i in range (5):
response = requests.get (url=url, verify=False, headers=headers) result = response.json ()
score = result['inc']
print ("账号【{0}】【shop_id:{1}】正在【兑换肥料】等待10秒".format (account, shop_id))
msg ("账号【{0}】【shop_id:{2}】执行【兑换肥料】任务成功,获取【{1}】肥料".format (account, score, shop_id))
time.sleep (10)
except Exception as e:
print (e)
msg ("账号【{0}】【shop_id:{1}】肥料兑换已达上限".format (account, shop_id))
time.sleep (1)
def watering(cookie, plant_id, sid, account):
try:
url = 'https://xinruimz-isv.isvjcloud.com/papi/watering'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/?sid={sid}&un_area=19_1655_4866_0',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
data = r'{"plant_id":' + f"{plant_id}" + r'}'
while True:
response = requests.post (url=url, verify=False, headers=headers,
data=data.encode ()) result = response.json ()
level = result['level'] complete_level = result['complete_level'] msg ("【账号{0}】【plant_id:{3}】成功浇水10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id))
time.sleep (5)
except Exception as e:
print(e)
def fertilization(cookie, plant_id, shop_id, account):
url = 'https://xinruimz-isv.isvjcloud.com/papi/fertilization'
headers = {
'Connection': 'keep-alive',
'Accept': 'application/x.jd-school-raffle.v1+json',
"Authorization": cookie,
'Referer': f'https://xinruimz-isv.isvjcloud.com/plantation/shop_index/?shop_id={shop_id}&channel=index',
'Host': 'xinruimz-isv.isvjcloud.com',
'User-Agent': userAgent (),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
"Content-Type": "application/json;charset=utf-8",
}
data = r'{"plant_id":' + f"{plant_id}" + r'}'
i = 1
while True:
try:
response = requests.post (url=url, verify=False, headers=headers, data=data) result = response.json ()
level = result['level'] complete_level = result['complete_level'] printT ("【账号{0}】【plant_id:{3}】成功施肥10g,当前等级{1},种子成熟等级为{2}".format (account, level, complete_level, plant_id))
time.sleep (5)
i += 1
except Exception as e:
message = result['message']
total = i * 10
if "肥料不足" in message:
msg("【账号{0}】【plant_id:{1}】本次一共施肥{2}g".format (account, plant_id,total))
printT ("【账号{0}】【plant_id:{1}】肥料不足10g".format (account, plant_id))
break
def start():
global cookie, cookies
print (f"\n【准备开始...】\n")
nowtime = datetime.datetime.now ().strftime ('%Y-%m-%d %H:%M:%S.%f8')
if cookie != '':
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie,account)
get_water (cookie, position_list, sid, account)
get_fertilizer (cookie, shop_id_list, account)
for i, j in zip (taskName_list, taskId_list):
do_task1 (cookie, i, j, account)
for i, j in zip (taskName_list2, taskId_list2):
do_task2 (cookie, i, j, account)
for i, j in zip (taskName_list3, taskId_list3):
do_task3 (cookie, i, j, sid, account)
flag = 0
for i in shop_id_list:
do_fertilizer_task (cookie, i, account) for k in shop_id_list:
taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task (cookie, k, account)
do_fertilizer_task4 (cookie, k, account)
do_fertilizer_task5 (cookie, k, account)
if beauty_plant_exchange == 'true':
do_fertilizer_task6 (cookie, k, account)
for i, j in zip (taskName_list2, taskId_list2):
print (i, j, k)
do_fertilizer_task2 (cookie, i, j, k, account) for i, j in zip (taskName_list3, taskId_list3):
print (i, j, k)
do_fertilizer_task3 (cookie, i, j, k, account)
if choose_plant_id == 'false':
for i in planted_id_list:
watering (cookie, i, sid, account)
fertilization (cookie, i, k, account)
else:
fertilization (cookie, planted_id_list[flag], k, account)
watering (cookie, planted_id, sid, account)
flag += 1
elif cookies != '':
for cookie, planted_id in zip (cookies, planted_ids):
try:
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
except Exception as e:
pass
for cookie, planted_id in zip (cookies, planted_ids):
try:
account = setName (cookie)
access_token = get_ck (cookie, sid_ck, account)
cookie = get_Authorization (access_token, account)
name_list, position_list, shop_id_list, planted_id_list = get_planted_info (cookie, sid, account)
taskName_list, taskId_list, taskName_list2, taskId_list2, taskName_list3, taskId_list3 = get_task (cookie, account)
get_water (cookie, position_list, sid, account)
get_fertilizer (cookie, shop_id_list, account)
for i, j in zip (taskName_list, taskId_list):
do_task1 (cookie, i, j, account)
for i, j in zip (taskName_list2, taskId_list2):
do_task2 (cookie, i, j, account)
for i, j in zip (taskName_list3, taskId_list3):
do_task3 (cookie, i, j, sid, account)
flag = 0
for i in shop_id_list:
do_fertilizer_task (cookie, i, account) for k in shop_id_list:
taskName_list2, taskId_list2, taskName_list3, taskId_list3, taskName_list4, taskId_list4 = get_fertilizer_task (
cookie, k, account)
do_fertilizer_task4 (cookie, k, account)
do_fertilizer_task5 (cookie, k, account)
if beauty_plant_exchange == 'true':
do_fertilizer_task6 (cookie, k, account)
for i, j in zip (taskName_list2, taskId_list2):
print (i, j, k)
do_fertilizer_task2 (cookie, i, j, k, account) for i, j in zip (taskName_list3, taskId_list3):
print (i, j, k)
do_fertilizer_task3 (cookie, i, j, k, account)
if choose_plant_id == 'false':
for i in planted_id_list:
fertilization (cookie, i, k, account)
watering (cookie, i, sid, account)
else:
print("【账号{}现在开始施肥】".format(account))
fertilization (cookie, planted_id_list[flag], k, account)
print ("【账号{}现在开始浇水】".format (account))
watering (cookie, planted_id, sid, account)
flag += 1
except Exception as e:
pass
else:
printT ("请检查变量plant_cookie是否已填写")
if __name__ == '__main__':
printT ("美丽研究院-种植园")
start ()
if '成功' in msg_info:
send ("美丽研究院-种植园", msg_info)
| true
| true
|
f701647c2b297015f025eb53bd191a1a8c54ec62
| 18,209
|
py
|
Python
|
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 48
|
2016-07-26T00:11:55.000Z
|
2022-02-23T13:36:33.000Z
|
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.layers.sparse_feature_cross."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
"""Tests with large batch size to force multithreding.
"""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(col1), self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([]), self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_output_zero_bucket(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[3735511728867393167]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_zero_bucket_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
def test_hashed_output(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[74]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v2(self):
"""Tests a simple scenario.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v1_has_collision(self):
"""Tests the old version of the fingerprint concatenation has collisions.
"""
# The last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1], hashed_output=True, num_buckets=1024)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.equal(values[0], values[1]).all())
def test_hashed_output_v2_has_no_collision(self):
"""Tests the new version of the fingerprint concatenation has no collisions.
"""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1],
hashed_output=True,
num_buckets=1024,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output.
"""
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
hashed_output=True,
num_buckets=1000)
with self.test_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
| 41.573059
| 80
| 0.649953
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
op = sparse_feature_cross_op.sparse_feature_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor(col1), self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
op = sparse_feature_cross_op.sparse_feature_cross([
self._sparse_tensor([]), self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_output_zero_bucket(self):
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True)
expected_out = self._sparse_tensor([[3735511728867393167]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_zero_bucket_v2(self):
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output(self):
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100)
expected_out = self._sparse_tensor([[74]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v2(self):
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hashed_output=True,
num_buckets=100,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
expected_out = self._sparse_tensor([[83]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output_v1_has_collision(self):
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1], hashed_output=True, num_buckets=1024)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.equal(values[0], values[1]).all())
def test_hashed_output_v2_has_no_collision(self):
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_feature_cross_op.sparse_feature_cross(
[t2, t1],
hashed_output=True,
num_buckets=1024,
hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
op = sparse_feature_cross_op.sparse_feature_cross(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
hashed_output=True,
num_buckets=1000)
with self.test_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
| true
| true
|
f70164867ee92c37dd1f5df7f9995e1f24766eff
| 4,105
|
py
|
Python
|
pyunity/scenes/runner.py
|
rayzchen/PyUnity
|
8ed436eca7a84f05190c1fa275c58da5c6059926
|
[
"MIT"
] | null | null | null |
pyunity/scenes/runner.py
|
rayzchen/PyUnity
|
8ed436eca7a84f05190c1fa275c58da5c6059926
|
[
"MIT"
] | null | null | null |
pyunity/scenes/runner.py
|
rayzchen/PyUnity
|
8ed436eca7a84f05190c1fa275c58da5c6059926
|
[
"MIT"
] | null | null | null |
__all__ = ["ChangeScene", "Runner", "WindowRunner", "NonInteractiveRunner", "newRunner"]
from .. import config, render, Logger
from ..events import EventLoopManager, WaitForUpdate, WaitForFixedUpdate, WaitForRender
from ..errors import PyUnityException
import copy
import os
class ChangeScene(Exception):
pass
class Runner:
def __init__(self):
self.scene = None
self.next = None
self.opened = False
def setScene(self, scene):
if self.opened:
raise PyUnityException("Cannot set scene after opening runner")
self.scene = copy.deepcopy(scene)
def setNext(self, scene):
if self.scene is None:
raise PyUnityException("Cannot set next before first scene")
self.next = copy.deepcopy(scene)
raise ChangeScene
def open(self):
if self.scene is None:
raise PyUnityException("Cannot open runner before setting a scene")
if self.opened:
Logger.Save()
self.opened = True
def setup(self):
pass
def load(self):
if self.scene is None:
raise PyUnityException("Cannot load runner before setting a scene")
Logger.LogLine(Logger.DEBUG, "Starting scene")
self.eventLoopManager = EventLoopManager()
self.eventLoopManager.schedule(self.scene.updateFixed, ups=50, waitFor=WaitForFixedUpdate)
self.eventLoopManager.addLoop(self.scene.startScripts())
def start(self):
while True:
try:
self.eventLoopManager.start()
break
except ChangeScene:
if self.next is None:
raise
self.eventLoopManager.quit()
self.scene.cleanUp()
self.scene = self.next
self.next = None
self.load()
def quit(self):
self.eventLoopManager.quit()
self.scene.cleanUp()
self.scene = None
self.opened = False
class WindowRunner(Runner):
def open(self):
super(WindowRunner, self).open()
os.environ["PYUNITY_GL_CONTEXT"] = "1"
self.window = config.windowProvider(self.scene.name)
# front buffer
self.window.refresh()
render.fillScreen()
# back buffer
self.window.refresh()
render.fillScreen()
def setup(self):
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
Logger.LogLine(Logger.DEBUG, "Compiling objects")
Logger.LogLine(Logger.INFO, "Compiling shaders")
render.compileShaders()
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
Logger.LogLine(Logger.INFO, "Loading skyboxes")
render.compileSkyboxes()
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
def load(self):
super(WindowRunner, self).load()
self.eventLoopManager.schedule(
self.scene.updateScripts, self.window.updateFunc,
ups=config.fps, waitFor=WaitForUpdate)
self.eventLoopManager.schedule(
self.window.refresh, self.scene.Render,
main=True, waitFor=WaitForRender)
if self.scene.mainCamera is not None:
self.window.setResize(self.scene.mainCamera.Resize)
self.scene.startOpenGL()
self.scene.startLoop()
def start(self):
super(WindowRunner, self).start()
def quit(self):
super(WindowRunner, self).quit()
del self.window
del os.environ["PYUNITY_GL_CONTEXT"]
render.resetShaders()
Logger.LogLine(Logger.INFO, "Reset shaders")
render.resetSkyboxes()
Logger.LogLine(Logger.INFO, "Reset skyboxes")
class NonInteractiveRunner(Runner):
def load(self):
super(NonInteractiveRunner, self).load()
self.eventLoopManager.schedule(
self.scene.updateScripts,
ups=config.fps, waitFor=WaitForUpdate)
self.scene.startLoop()
def newRunner():
if os.environ["PYUNITY_INTERACTIVE"] == "1":
return WindowRunner()
else:
return NonInteractiveRunner()
| 31.098485
| 98
| 0.625335
|
__all__ = ["ChangeScene", "Runner", "WindowRunner", "NonInteractiveRunner", "newRunner"]
from .. import config, render, Logger
from ..events import EventLoopManager, WaitForUpdate, WaitForFixedUpdate, WaitForRender
from ..errors import PyUnityException
import copy
import os
class ChangeScene(Exception):
pass
class Runner:
def __init__(self):
self.scene = None
self.next = None
self.opened = False
def setScene(self, scene):
if self.opened:
raise PyUnityException("Cannot set scene after opening runner")
self.scene = copy.deepcopy(scene)
def setNext(self, scene):
if self.scene is None:
raise PyUnityException("Cannot set next before first scene")
self.next = copy.deepcopy(scene)
raise ChangeScene
def open(self):
if self.scene is None:
raise PyUnityException("Cannot open runner before setting a scene")
if self.opened:
Logger.Save()
self.opened = True
def setup(self):
pass
def load(self):
if self.scene is None:
raise PyUnityException("Cannot load runner before setting a scene")
Logger.LogLine(Logger.DEBUG, "Starting scene")
self.eventLoopManager = EventLoopManager()
self.eventLoopManager.schedule(self.scene.updateFixed, ups=50, waitFor=WaitForFixedUpdate)
self.eventLoopManager.addLoop(self.scene.startScripts())
def start(self):
while True:
try:
self.eventLoopManager.start()
break
except ChangeScene:
if self.next is None:
raise
self.eventLoopManager.quit()
self.scene.cleanUp()
self.scene = self.next
self.next = None
self.load()
def quit(self):
self.eventLoopManager.quit()
self.scene.cleanUp()
self.scene = None
self.opened = False
class WindowRunner(Runner):
def open(self):
super(WindowRunner, self).open()
os.environ["PYUNITY_GL_CONTEXT"] = "1"
self.window = config.windowProvider(self.scene.name)
self.window.refresh()
render.fillScreen()
self.window.refresh()
render.fillScreen()
def setup(self):
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
Logger.LogLine(Logger.DEBUG, "Compiling objects")
Logger.LogLine(Logger.INFO, "Compiling shaders")
render.compileShaders()
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
Logger.LogLine(Logger.INFO, "Loading skyboxes")
render.compileSkyboxes()
Logger.LogSpecial(Logger.INFO, Logger.ELAPSED_TIME)
def load(self):
super(WindowRunner, self).load()
self.eventLoopManager.schedule(
self.scene.updateScripts, self.window.updateFunc,
ups=config.fps, waitFor=WaitForUpdate)
self.eventLoopManager.schedule(
self.window.refresh, self.scene.Render,
main=True, waitFor=WaitForRender)
if self.scene.mainCamera is not None:
self.window.setResize(self.scene.mainCamera.Resize)
self.scene.startOpenGL()
self.scene.startLoop()
def start(self):
super(WindowRunner, self).start()
def quit(self):
super(WindowRunner, self).quit()
del self.window
del os.environ["PYUNITY_GL_CONTEXT"]
render.resetShaders()
Logger.LogLine(Logger.INFO, "Reset shaders")
render.resetSkyboxes()
Logger.LogLine(Logger.INFO, "Reset skyboxes")
class NonInteractiveRunner(Runner):
def load(self):
super(NonInteractiveRunner, self).load()
self.eventLoopManager.schedule(
self.scene.updateScripts,
ups=config.fps, waitFor=WaitForUpdate)
self.scene.startLoop()
def newRunner():
if os.environ["PYUNITY_INTERACTIVE"] == "1":
return WindowRunner()
else:
return NonInteractiveRunner()
| true
| true
|
f7016532b37838aa17bbc7b1b1e0324a6363051f
| 2,571
|
py
|
Python
|
docs/_ext/rapidsmsdocs.py
|
datamade/rapidsms
|
052766eca9d5a79ccc4d1d3b5956c1c34c0d99cb
|
[
"BSD-3-Clause"
] | null | null | null |
docs/_ext/rapidsmsdocs.py
|
datamade/rapidsms
|
052766eca9d5a79ccc4d1d3b5956c1c34c0d99cb
|
[
"BSD-3-Clause"
] | 2
|
2018-08-03T18:48:09.000Z
|
2019-01-02T19:33:23.000Z
|
docs/_ext/rapidsmsdocs.py
|
datamade/rapidsms
|
052766eca9d5a79ccc4d1d3b5956c1c34c0d99cb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Sphinx plugins for RapidSMS documentation.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles
from docutils.parsers.rst import Directive
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "router",
rolename = "router",
indextemplate = "pair: %s; router",
)
app.add_config_value('rapidsms_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.rapidsms_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = 'Please, see the release notes </releases/%s>' % (arg0)
xrefs = roles.XRefRole()('doc', linktext, linktext,
self.lineno, self.state)
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset,
node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node,
self.lineno)
return ret
| 31.740741
| 82
| 0.56515
|
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles
from docutils.parsers.rst import Directive
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "router",
rolename = "router",
indextemplate = "pair: %s; router",
)
app.add_config_value('rapidsms_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.rapidsms_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = 'Please, see the release notes </releases/%s>' % (arg0)
xrefs = roles.XRefRole()('doc', linktext, linktext,
self.lineno, self.state)
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset,
node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node,
self.lineno)
return ret
| true
| true
|
f70165831b1a0ae798b12f7c6cfd6eaade682b3b
| 1,528
|
py
|
Python
|
src/11.py
|
vulpicastor/advent-of-code-2021
|
12aaf84091604caf88acf3b4f7a118d866c33f5f
|
[
"MIT"
] | null | null | null |
src/11.py
|
vulpicastor/advent-of-code-2021
|
12aaf84091604caf88acf3b4f7a118d866c33f5f
|
[
"MIT"
] | null | null | null |
src/11.py
|
vulpicastor/advent-of-code-2021
|
12aaf84091604caf88acf3b4f7a118d866c33f5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=unused-import
import collections
import functools
import io
import itertools
import operator as op
import re
import timeit
import numpy as np
import aocd
YEAR = 2021
DAY = 11
def step(grid):
grid += 1
flash = np.zeros_like(grid, dtype=bool)
while np.any(grid[~flash] > 9):
new_flash = (grid > 9) ^ flash
grid[:-1, :-1] += new_flash[1:, 1:]
grid[:-1, :] += new_flash[1:, :]
grid[:-1, 1:] += new_flash[1:, :-1]
grid[:, :-1] += new_flash[:, 1:]
grid[:, 1:] += new_flash[:, :-1]
grid[1:, :-1] += new_flash[:-1, 1:]
grid[1:, :] += new_flash[:-1, :]
grid[1:, 1:] += new_flash[:-1, :-1]
flash |= new_flash
grid[flash] = 0
return flash
def main():
data = """5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526"""
data = aocd.get_data(day=DAY, year=YEAR)
inlist = np.array([list(map(int, l)) for l in data.split('\n')])
print(inlist)
grid = inlist.copy()
num_flashes = 0
for i in range(100):
num_flashes += np.sum(step(grid))
print(num_flashes)
answer = num_flashes
aocd.submit(answer, part='a', day=DAY, year=YEAR)
grid = inlist.copy()
for i in itertools.count(1):
flash = step(grid)
if np.all(flash):
answer = i
break
print(answer)
aocd.submit(answer, part='b', day=DAY, year=YEAR)
if __name__ == '__main__':
main()
| 20.931507
| 68
| 0.578534
|
import collections
import functools
import io
import itertools
import operator as op
import re
import timeit
import numpy as np
import aocd
YEAR = 2021
DAY = 11
def step(grid):
grid += 1
flash = np.zeros_like(grid, dtype=bool)
while np.any(grid[~flash] > 9):
new_flash = (grid > 9) ^ flash
grid[:-1, :-1] += new_flash[1:, 1:]
grid[:-1, :] += new_flash[1:, :]
grid[:-1, 1:] += new_flash[1:, :-1]
grid[:, :-1] += new_flash[:, 1:]
grid[:, 1:] += new_flash[:, :-1]
grid[1:, :-1] += new_flash[:-1, 1:]
grid[1:, :] += new_flash[:-1, :]
grid[1:, 1:] += new_flash[:-1, :-1]
flash |= new_flash
grid[flash] = 0
return flash
def main():
data = """5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526"""
data = aocd.get_data(day=DAY, year=YEAR)
inlist = np.array([list(map(int, l)) for l in data.split('\n')])
print(inlist)
grid = inlist.copy()
num_flashes = 0
for i in range(100):
num_flashes += np.sum(step(grid))
print(num_flashes)
answer = num_flashes
aocd.submit(answer, part='a', day=DAY, year=YEAR)
grid = inlist.copy()
for i in itertools.count(1):
flash = step(grid)
if np.all(flash):
answer = i
break
print(answer)
aocd.submit(answer, part='b', day=DAY, year=YEAR)
if __name__ == '__main__':
main()
| true
| true
|
f70165a3a17fd920644947b39c3f7192a33d8c30
| 5,864
|
py
|
Python
|
tests/rest/test_connection.py
|
lforesta/openeo-python-client
|
d6cfc17f9693f6f6bf4a2ce60eac180ee9576543
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/test_connection.py
|
lforesta/openeo-python-client
|
d6cfc17f9693f6f6bf4a2ce60eac180ee9576543
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/test_connection.py
|
lforesta/openeo-python-client
|
d6cfc17f9693f6f6bf4a2ce60eac180ee9576543
|
[
"Apache-2.0"
] | null | null | null |
import unittest.mock as mock
import pytest
import requests_mock
from openeo.rest.auth.auth import NullAuth, BearerAuth
from openeo.rest.connection import Connection, RestApiConnection, connect, OpenEoApiError
API_URL = "https://oeo.net/"
@pytest.mark.parametrize(
["base", "paths", "expected_path"],
[
# Simple
("https://oeo.net", ["foo", "/foo"], "https://oeo.net/foo"),
("https://oeo.net/", ["foo", "/foo"], "https://oeo.net/foo"),
# With trailing slash
("https://oeo.net", ["foo/", "/foo/"], "https://oeo.net/foo/"),
("https://oeo.net/", ["foo/", "/foo/"], "https://oeo.net/foo/"),
# Deeper
("https://oeo.net/api/v04", ["foo/bar", "/foo/bar"], "https://oeo.net/api/v04/foo/bar"),
("https://oeo.net/api/v04/", ["foo/bar", "/foo/bar"], "https://oeo.net/api/v04/foo/bar"),
("https://oeo.net/api/v04", ["foo/bar/", "/foo/bar/"], "https://oeo.net/api/v04/foo/bar/"),
("https://oeo.net/api/v04/", ["foo/bar/", "/foo/bar/"], "https://oeo.net/api/v04/foo/bar/"),
]
)
def test_rest_api_connection_url_handling(requests_mock, base, paths, expected_path):
"""Test connection __init__ and proper joining of root url and API path"""
conn = RestApiConnection(base)
requests_mock.get(expected_path, text="payload")
requests_mock.post(expected_path, text="payload")
for path in paths:
assert conn.get(path).text == "payload"
assert conn.post(path, {"foo": "bar"}).text == "payload"
def test_rest_api_headers():
conn = RestApiConnection(API_URL)
with requests_mock.Mocker() as m:
def text(request, context):
assert request.headers["User-Agent"].startswith("openeo-python-client")
assert request.headers["X-Openeo-Bar"] == "XY123"
m.get("/foo", text=text)
m.post("/foo", text=text)
conn.get("/foo", headers={"X-Openeo-Bar": "XY123"})
conn.post("/foo", {}, headers={"X-Openeo-Bar": "XY123"})
def test_connection_with_session():
session = mock.Mock()
response = session.request.return_value
response.status_code = 200
response.json.return_value = {"foo": "bar"}
conn = Connection("https://oeo.net/", session=session)
assert conn.capabilities().capabilities == {"foo": "bar"}
session.request.assert_any_call(
url="https://oeo.net/", method="get", headers=mock.ANY, stream=mock.ANY, auth=mock.ANY
)
def test_connect_with_session():
session = mock.Mock()
response = session.request.return_value
response.status_code = 200
response.json.return_value = {"foo": "bar"}
conn = connect("https://oeo.net/", session=session)
assert conn.capabilities().capabilities == {"foo": "bar"}
session.request.assert_any_call(
url="https://oeo.net/", method="get", headers=mock.ANY, stream=mock.ANY, auth=mock.ANY
)
def test_api_error(requests_mock):
conn = Connection(API_URL)
requests_mock.get('https://oeo.net/collections/foobar', status_code=404, json={
"code": "CollectionNotFound", "message": "No such things as a collection 'foobar'", "id": "54321"
})
with pytest.raises(OpenEoApiError) as exc_info:
conn.describe_collection("foobar")
exc = exc_info.value
assert exc.http_status_code == 404
assert exc.code == "CollectionNotFound"
assert exc.message == "No such things as a collection 'foobar'"
assert exc.id == "54321"
assert exc.url is None
def test_api_error_non_json(requests_mock):
conn = Connection(API_URL)
requests_mock.get('https://oeo.net/collections/foobar', status_code=500, text="olapola")
with pytest.raises(OpenEoApiError) as exc_info:
conn.describe_collection("foobar")
exc = exc_info.value
assert exc.http_status_code == 500
assert exc.code == "unknown"
assert exc.message == "olapola"
assert exc.id is None
assert exc.url is None
def test_authenticate_basic(requests_mock):
conn = Connection(API_URL)
def text_callback(request, context):
assert request.headers["Authorization"] == "Basic am9objpqMGhu"
return '{"access_token":"w3lc0m3"}'
requests_mock.get('https://oeo.net/credentials/basic', text=text_callback)
assert isinstance(conn.auth, NullAuth)
conn.authenticate_basic(username="john", password="j0hn")
assert isinstance(conn.auth, BearerAuth)
assert conn.auth.bearer == "w3lc0m3"
def test_authenticate_oidc(oidc_test_setup):
# see test/rest/conftest.py for `oidc_test_setup` fixture
client_id = "myclient"
oidc_discovery_url = "https://oeo.net/credentials/oidc"
state, webbrowser_open = oidc_test_setup(client_id=client_id, oidc_discovery_url=oidc_discovery_url)
# With all this set up, kick off the openid connect flow
conn = Connection(API_URL)
assert isinstance(conn.auth, NullAuth)
conn.authenticate_OIDC(client_id=client_id, webbrowser_open=webbrowser_open)
assert isinstance(conn.auth, BearerAuth)
assert conn.auth.bearer == state["access_token"]
def test_load_collection_arguments(requests_mock):
conn = Connection(API_URL)
requests_mock.get(API_URL, json={"version": "0.4.0"})
requests_mock.get(API_URL + "collections/FOO", json={
"properties": {"eo:bands": [{"name": "red"}, {"name": "green"}, {"name": "blue"}]}
})
spatial_extent = {"west": 1, "south": 2, "east": 3, "north": 4}
temporal_extent = ["2019-01-01", "2019-01-22"]
im = conn.load_collection(
"FOO", spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=["red", "green"]
)
node = im.graph[im.node_id]
assert node["process_id"] == "load_collection"
assert node["arguments"] == {
"id": "FOO",
"spatial_extent": spatial_extent,
"temporal_extent": temporal_extent,
"bands": ["red", "green"]
}
| 38.834437
| 105
| 0.657913
|
import unittest.mock as mock
import pytest
import requests_mock
from openeo.rest.auth.auth import NullAuth, BearerAuth
from openeo.rest.connection import Connection, RestApiConnection, connect, OpenEoApiError
API_URL = "https://oeo.net/"
@pytest.mark.parametrize(
["base", "paths", "expected_path"],
[
("https://oeo.net", ["foo", "/foo"], "https://oeo.net/foo"),
("https://oeo.net/", ["foo", "/foo"], "https://oeo.net/foo"),
("https://oeo.net", ["foo/", "/foo/"], "https://oeo.net/foo/"),
("https://oeo.net/", ["foo/", "/foo/"], "https://oeo.net/foo/"),
("https://oeo.net/api/v04", ["foo/bar", "/foo/bar"], "https://oeo.net/api/v04/foo/bar"),
("https://oeo.net/api/v04/", ["foo/bar", "/foo/bar"], "https://oeo.net/api/v04/foo/bar"),
("https://oeo.net/api/v04", ["foo/bar/", "/foo/bar/"], "https://oeo.net/api/v04/foo/bar/"),
("https://oeo.net/api/v04/", ["foo/bar/", "/foo/bar/"], "https://oeo.net/api/v04/foo/bar/"),
]
)
def test_rest_api_connection_url_handling(requests_mock, base, paths, expected_path):
conn = RestApiConnection(base)
requests_mock.get(expected_path, text="payload")
requests_mock.post(expected_path, text="payload")
for path in paths:
assert conn.get(path).text == "payload"
assert conn.post(path, {"foo": "bar"}).text == "payload"
def test_rest_api_headers():
conn = RestApiConnection(API_URL)
with requests_mock.Mocker() as m:
def text(request, context):
assert request.headers["User-Agent"].startswith("openeo-python-client")
assert request.headers["X-Openeo-Bar"] == "XY123"
m.get("/foo", text=text)
m.post("/foo", text=text)
conn.get("/foo", headers={"X-Openeo-Bar": "XY123"})
conn.post("/foo", {}, headers={"X-Openeo-Bar": "XY123"})
def test_connection_with_session():
session = mock.Mock()
response = session.request.return_value
response.status_code = 200
response.json.return_value = {"foo": "bar"}
conn = Connection("https://oeo.net/", session=session)
assert conn.capabilities().capabilities == {"foo": "bar"}
session.request.assert_any_call(
url="https://oeo.net/", method="get", headers=mock.ANY, stream=mock.ANY, auth=mock.ANY
)
def test_connect_with_session():
session = mock.Mock()
response = session.request.return_value
response.status_code = 200
response.json.return_value = {"foo": "bar"}
conn = connect("https://oeo.net/", session=session)
assert conn.capabilities().capabilities == {"foo": "bar"}
session.request.assert_any_call(
url="https://oeo.net/", method="get", headers=mock.ANY, stream=mock.ANY, auth=mock.ANY
)
def test_api_error(requests_mock):
conn = Connection(API_URL)
requests_mock.get('https://oeo.net/collections/foobar', status_code=404, json={
"code": "CollectionNotFound", "message": "No such things as a collection 'foobar'", "id": "54321"
})
with pytest.raises(OpenEoApiError) as exc_info:
conn.describe_collection("foobar")
exc = exc_info.value
assert exc.http_status_code == 404
assert exc.code == "CollectionNotFound"
assert exc.message == "No such things as a collection 'foobar'"
assert exc.id == "54321"
assert exc.url is None
def test_api_error_non_json(requests_mock):
conn = Connection(API_URL)
requests_mock.get('https://oeo.net/collections/foobar', status_code=500, text="olapola")
with pytest.raises(OpenEoApiError) as exc_info:
conn.describe_collection("foobar")
exc = exc_info.value
assert exc.http_status_code == 500
assert exc.code == "unknown"
assert exc.message == "olapola"
assert exc.id is None
assert exc.url is None
def test_authenticate_basic(requests_mock):
conn = Connection(API_URL)
def text_callback(request, context):
assert request.headers["Authorization"] == "Basic am9objpqMGhu"
return '{"access_token":"w3lc0m3"}'
requests_mock.get('https://oeo.net/credentials/basic', text=text_callback)
assert isinstance(conn.auth, NullAuth)
conn.authenticate_basic(username="john", password="j0hn")
assert isinstance(conn.auth, BearerAuth)
assert conn.auth.bearer == "w3lc0m3"
def test_authenticate_oidc(oidc_test_setup):
client_id = "myclient"
oidc_discovery_url = "https://oeo.net/credentials/oidc"
state, webbrowser_open = oidc_test_setup(client_id=client_id, oidc_discovery_url=oidc_discovery_url)
conn = Connection(API_URL)
assert isinstance(conn.auth, NullAuth)
conn.authenticate_OIDC(client_id=client_id, webbrowser_open=webbrowser_open)
assert isinstance(conn.auth, BearerAuth)
assert conn.auth.bearer == state["access_token"]
def test_load_collection_arguments(requests_mock):
conn = Connection(API_URL)
requests_mock.get(API_URL, json={"version": "0.4.0"})
requests_mock.get(API_URL + "collections/FOO", json={
"properties": {"eo:bands": [{"name": "red"}, {"name": "green"}, {"name": "blue"}]}
})
spatial_extent = {"west": 1, "south": 2, "east": 3, "north": 4}
temporal_extent = ["2019-01-01", "2019-01-22"]
im = conn.load_collection(
"FOO", spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=["red", "green"]
)
node = im.graph[im.node_id]
assert node["process_id"] == "load_collection"
assert node["arguments"] == {
"id": "FOO",
"spatial_extent": spatial_extent,
"temporal_extent": temporal_extent,
"bands": ["red", "green"]
}
| true
| true
|
f701678d9565d7ea82674dde93f7d7ccdabe6c52
| 10,194
|
py
|
Python
|
yufuquantsdk/clients.py
|
We-Hack-Studio/nuts-sdk
|
54ea379156b9f3c36a3ec05ba3d161b16ef43fc4
|
[
"MIT"
] | 1
|
2021-04-11T03:48:17.000Z
|
2021-04-11T03:48:17.000Z
|
yufuquantsdk/clients.py
|
We-Hack-Studio/nuts-sdk
|
54ea379156b9f3c36a3ec05ba3d161b16ef43fc4
|
[
"MIT"
] | null | null | null |
yufuquantsdk/clients.py
|
We-Hack-Studio/nuts-sdk
|
54ea379156b9f3c36a3ec05ba3d161b16ef43fc4
|
[
"MIT"
] | 3
|
2021-04-17T22:52:05.000Z
|
2022-01-05T01:30:57.000Z
|
import asyncio
import json
import logging
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Set, Union
import httpx
import websockets
from websockets import exceptions
logger = logging.getLogger("yufuquantsdk")
class WebsocketAPIClient:
def __init__(self, uri: str, ws: websockets.WebSocketClientProtocol = None) -> None:
self._uri: str = uri
self._ws: websockets.WebSocketClientProtocol = ws
self._authed: bool = False
self._api_key = ""
self._sub_topics: Set[str] = set()
self._inputs: asyncio.Queue[str] = asyncio.Queue()
self._outputs: asyncio.Queue[str] = asyncio.Queue(maxsize=100)
self._run_task: asyncio.Task[Any] = asyncio.get_event_loop().create_task(
self._run()
)
async def auth(self, api_key: str):
message = {
"cmd": "auth",
"api_key": api_key,
}
await self._deliver(json.dumps(message))
self._authed = True
self._api_key = api_key
async def sub(self, topics: Iterable[str]):
# Remove duplicated topics
if not isinstance(topics, set):
topics = set(topics)
message = {
"cmd": "sub",
"topics": list(topics), # Object of type set is not JSON serializable
}
await self._deliver(json.dumps(message))
self._sub_topics = topics
async def unsub(self, topics: Iterable[str]):
# Remove duplicated topics
if not isinstance(topics, set):
topics = set(topics)
message = {
"cmd": "unsub",
"topics": list(topics),
}
await self._deliver(json.dumps(message))
self._sub_topics = self._sub_topics - topics
async def robot_ping(self):
data = {"timestamp": int(datetime.now().timestamp() * 1000)}
message = {"category": "robotPing", "data": data}
await self._broadcast(message)
async def robot_log(self, text: str, level: str = "info"):
data = {
"text": text,
"level": level,
"timestamp": int(datetime.now().timestamp()) * 1000,
}
message = {"category": "robotLog", "data": data}
await self._broadcast(message)
async def robot_position_store(self, positions):
data = {
"updatedAt": datetime.now().isoformat(),
"positions": positions,
}
message = {"category": "robotPositionStore", "data": data}
await self._broadcast(message)
async def robot_order_store(self, orders):
data = {
"updatedAt": datetime.now().isoformat(),
"orders": orders,
}
message = {"category": "robotOrderStore", "data": data}
await self._broadcast(message)
async def robot_strategy_store(self, data):
d = {
"updatedAt": datetime.now().isoformat(),
"data": data,
}
message = {"category": "robotStrategyStore", "data": d}
await self._broadcast(message)
async def _connect(self, **kwargs):
# disable ping
kwargs["ping_interval"] = None
retry_count = 0
for i in range(3):
try:
self._ws = await websockets.connect(self._uri, **kwargs)
break
except Exception as exc:
logger.exception("Failed to connect to %s: %s.", self._uri, exc)
retry_count += 1
if retry_count >= 3:
raise
await asyncio.sleep(10)
logger.info("Connected to %s.", self._uri)
async def _reconnect(self):
await self._connect()
if self._authed:
await self.auth(self._api_key)
if len(self._sub_topics) > 0:
await self.sub(self._sub_topics)
logger.info("Reconnected to %s.", self._uri)
async def _deliver(self, s: str):
await self._inputs.put(s)
async def _send(self, s: str):
assert self._ws is not None, "No connection!"
try:
await self._ws.send(s)
logger.debug(">>> %s", s)
except websockets.ConnectionClosed as exc:
logger.exception(exc)
await self._reconnect()
async def _broadcast(self, message: Dict):
data = {"cmd": "broadcast", "message": message}
await self._deliver(json.dumps(data))
async def _pong(self, message: Dict[str, int]):
await self._send(json.dumps({"pong": message["ping"]}))
# todo: handle stop signal
async def _run(self):
await self._connect()
try:
while True:
incoming: asyncio.Task[Any] = asyncio.create_task(self._ws.recv())
outgoing: asyncio.Task[Any] = asyncio.create_task(self._inputs.get())
done: Set[asyncio.Future[Any]]
pending: Set[asyncio.Future[Any]]
done, pending = await asyncio.wait(
[incoming, outgoing], return_when=asyncio.FIRST_COMPLETED
)
# Cancel pending tasks to avoid leaking them.
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
logger.debug("<<< %s", message)
except websockets.ConnectionClosed as exc:
logger.exception(exc)
await self._reconnect()
else:
decoded = json.loads(message)
if "ping" in decoded:
await self._pong(decoded)
else:
try:
self._outputs.put_nowait(decoded)
except asyncio.QueueFull:
logger.warning("The outputs queue is full.")
if outgoing in done:
message = outgoing.result()
await self._send(message)
finally:
await self.close()
async def close(self):
ws = self._ws
self._ws = None
await ws.close()
close_status = exceptions.format_close(ws.close_code, ws.close_reason)
logger.info(f"Connection closed: {close_status}.")
ROBOT_REQ_PATH = "/robots/{robot_id}/"
ROBOT_PING_REQ_PATH = "/robots/{robot_id}/ping/"
ROBOT_ASSET_RECORD_REQ_PATH = "/robots/{robot_id}/assetRecord/"
ROBOT_STRATEGY_PARAMETERS_REQ_PATH = "/robots/{robot_id}/strategyParameters/"
ROBOT_CREDENTIAL_KEY_REQ_PATH = "/robots/{robot_id}/credentialKey/"
ROBOT_POSITION_STORE_REQ_PATH = "/robots/{robot_id}/positionStore/"
ROBOT_ORDER_STORE_REQ_PATH = "/robots/{robot_id}/orderStore/"
ROBOT_STRATEGY_STORE_REQ_PATH = "/robots/{robot_id}/strategyStore/"
class RESTAPIClient:
def __init__(self, base_url: str, api_key: str):
self._base_url: str = base_url.rstrip("/")
self._api_key: str = api_key
async def get_robot(self, robot_id: int):
req_path = ROBOT_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def update_robot_asset_record(self, robot_id: int, data: Dict[str, Any]):
req_path = ROBOT_ASSET_RECORD_REQ_PATH.format(robot_id=robot_id)
return await self._request("PATCH", req_path, data=data)
async def update_robot_strategy_store(self, robot_id: int, data: Dict[str, Any]):
req_path = ROBOT_STRATEGY_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def update_robot_position_store(
self, robot_id: int, data: List[Dict[str, Any]]
):
req_path = ROBOT_POSITION_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def update_robot_order_store(self, robot_id: int, data: List[Dict[str, Any]]):
req_path = ROBOT_ORDER_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def ping_robot(self, robot_id: int):
req_path = ROBOT_PING_REQ_PATH.format(robot_id=robot_id)
return await self._request("POST", req_path)
async def get_robot_strategy_parameters(self, robot_id: int):
req_path = ROBOT_STRATEGY_PARAMETERS_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def get_robot_credential_key(self, robot_id: int):
req_path = ROBOT_CREDENTIAL_KEY_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def _request(
self,
method: str,
req_path: str,
headers: Optional[Dict[str, str]] = None,
params: Optional[Dict[str, str]] = None,
data: Optional[Union[Dict, List]] = None,
auth: bool = True,
):
req_headers = {"Content-Type": "application/json"}
if auth:
req_headers["X-Api-Key"] = self._api_key
if headers is not None:
req_headers.update(headers)
url = self._base_url + req_path
async with httpx.AsyncClient() as client:
logger.debug(
"%s %s, Request<headers=%s params=%s data=%s>",
method,
url,
req_headers,
params,
data,
)
res = await client.request(
method,
url,
headers=req_headers,
params=params,
json=data,
timeout=5,
)
http_text = res.text
logger.debug(
"%s %s, Response<status_code=%s headers=%s http_text=%s>",
method,
url,
res.status_code,
req_headers,
http_text,
)
res.raise_for_status()
if res.status_code == "204":
return None
return res.json()
| 35.151724
| 88
| 0.572101
|
import asyncio
import json
import logging
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Set, Union
import httpx
import websockets
from websockets import exceptions
logger = logging.getLogger("yufuquantsdk")
class WebsocketAPIClient:
def __init__(self, uri: str, ws: websockets.WebSocketClientProtocol = None) -> None:
self._uri: str = uri
self._ws: websockets.WebSocketClientProtocol = ws
self._authed: bool = False
self._api_key = ""
self._sub_topics: Set[str] = set()
self._inputs: asyncio.Queue[str] = asyncio.Queue()
self._outputs: asyncio.Queue[str] = asyncio.Queue(maxsize=100)
self._run_task: asyncio.Task[Any] = asyncio.get_event_loop().create_task(
self._run()
)
async def auth(self, api_key: str):
message = {
"cmd": "auth",
"api_key": api_key,
}
await self._deliver(json.dumps(message))
self._authed = True
self._api_key = api_key
async def sub(self, topics: Iterable[str]):
if not isinstance(topics, set):
topics = set(topics)
message = {
"cmd": "sub",
"topics": list(topics), }
await self._deliver(json.dumps(message))
self._sub_topics = topics
async def unsub(self, topics: Iterable[str]):
if not isinstance(topics, set):
topics = set(topics)
message = {
"cmd": "unsub",
"topics": list(topics),
}
await self._deliver(json.dumps(message))
self._sub_topics = self._sub_topics - topics
async def robot_ping(self):
data = {"timestamp": int(datetime.now().timestamp() * 1000)}
message = {"category": "robotPing", "data": data}
await self._broadcast(message)
async def robot_log(self, text: str, level: str = "info"):
data = {
"text": text,
"level": level,
"timestamp": int(datetime.now().timestamp()) * 1000,
}
message = {"category": "robotLog", "data": data}
await self._broadcast(message)
async def robot_position_store(self, positions):
data = {
"updatedAt": datetime.now().isoformat(),
"positions": positions,
}
message = {"category": "robotPositionStore", "data": data}
await self._broadcast(message)
async def robot_order_store(self, orders):
data = {
"updatedAt": datetime.now().isoformat(),
"orders": orders,
}
message = {"category": "robotOrderStore", "data": data}
await self._broadcast(message)
async def robot_strategy_store(self, data):
d = {
"updatedAt": datetime.now().isoformat(),
"data": data,
}
message = {"category": "robotStrategyStore", "data": d}
await self._broadcast(message)
async def _connect(self, **kwargs):
kwargs["ping_interval"] = None
retry_count = 0
for i in range(3):
try:
self._ws = await websockets.connect(self._uri, **kwargs)
break
except Exception as exc:
logger.exception("Failed to connect to %s: %s.", self._uri, exc)
retry_count += 1
if retry_count >= 3:
raise
await asyncio.sleep(10)
logger.info("Connected to %s.", self._uri)
async def _reconnect(self):
await self._connect()
if self._authed:
await self.auth(self._api_key)
if len(self._sub_topics) > 0:
await self.sub(self._sub_topics)
logger.info("Reconnected to %s.", self._uri)
async def _deliver(self, s: str):
await self._inputs.put(s)
async def _send(self, s: str):
assert self._ws is not None, "No connection!"
try:
await self._ws.send(s)
logger.debug(">>> %s", s)
except websockets.ConnectionClosed as exc:
logger.exception(exc)
await self._reconnect()
async def _broadcast(self, message: Dict):
data = {"cmd": "broadcast", "message": message}
await self._deliver(json.dumps(data))
async def _pong(self, message: Dict[str, int]):
await self._send(json.dumps({"pong": message["ping"]}))
async def _run(self):
await self._connect()
try:
while True:
incoming: asyncio.Task[Any] = asyncio.create_task(self._ws.recv())
outgoing: asyncio.Task[Any] = asyncio.create_task(self._inputs.get())
done: Set[asyncio.Future[Any]]
pending: Set[asyncio.Future[Any]]
done, pending = await asyncio.wait(
[incoming, outgoing], return_when=asyncio.FIRST_COMPLETED
)
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
logger.debug("<<< %s", message)
except websockets.ConnectionClosed as exc:
logger.exception(exc)
await self._reconnect()
else:
decoded = json.loads(message)
if "ping" in decoded:
await self._pong(decoded)
else:
try:
self._outputs.put_nowait(decoded)
except asyncio.QueueFull:
logger.warning("The outputs queue is full.")
if outgoing in done:
message = outgoing.result()
await self._send(message)
finally:
await self.close()
async def close(self):
ws = self._ws
self._ws = None
await ws.close()
close_status = exceptions.format_close(ws.close_code, ws.close_reason)
logger.info(f"Connection closed: {close_status}.")
ROBOT_REQ_PATH = "/robots/{robot_id}/"
ROBOT_PING_REQ_PATH = "/robots/{robot_id}/ping/"
ROBOT_ASSET_RECORD_REQ_PATH = "/robots/{robot_id}/assetRecord/"
ROBOT_STRATEGY_PARAMETERS_REQ_PATH = "/robots/{robot_id}/strategyParameters/"
ROBOT_CREDENTIAL_KEY_REQ_PATH = "/robots/{robot_id}/credentialKey/"
ROBOT_POSITION_STORE_REQ_PATH = "/robots/{robot_id}/positionStore/"
ROBOT_ORDER_STORE_REQ_PATH = "/robots/{robot_id}/orderStore/"
ROBOT_STRATEGY_STORE_REQ_PATH = "/robots/{robot_id}/strategyStore/"
class RESTAPIClient:
def __init__(self, base_url: str, api_key: str):
self._base_url: str = base_url.rstrip("/")
self._api_key: str = api_key
async def get_robot(self, robot_id: int):
req_path = ROBOT_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def update_robot_asset_record(self, robot_id: int, data: Dict[str, Any]):
req_path = ROBOT_ASSET_RECORD_REQ_PATH.format(robot_id=robot_id)
return await self._request("PATCH", req_path, data=data)
async def update_robot_strategy_store(self, robot_id: int, data: Dict[str, Any]):
req_path = ROBOT_STRATEGY_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def update_robot_position_store(
self, robot_id: int, data: List[Dict[str, Any]]
):
req_path = ROBOT_POSITION_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def update_robot_order_store(self, robot_id: int, data: List[Dict[str, Any]]):
req_path = ROBOT_ORDER_STORE_REQ_PATH.format(robot_id=robot_id)
return await self._request("PUT", req_path, data=data)
async def ping_robot(self, robot_id: int):
req_path = ROBOT_PING_REQ_PATH.format(robot_id=robot_id)
return await self._request("POST", req_path)
async def get_robot_strategy_parameters(self, robot_id: int):
req_path = ROBOT_STRATEGY_PARAMETERS_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def get_robot_credential_key(self, robot_id: int):
req_path = ROBOT_CREDENTIAL_KEY_REQ_PATH.format(robot_id=robot_id)
return await self._request("GET", req_path)
async def _request(
self,
method: str,
req_path: str,
headers: Optional[Dict[str, str]] = None,
params: Optional[Dict[str, str]] = None,
data: Optional[Union[Dict, List]] = None,
auth: bool = True,
):
req_headers = {"Content-Type": "application/json"}
if auth:
req_headers["X-Api-Key"] = self._api_key
if headers is not None:
req_headers.update(headers)
url = self._base_url + req_path
async with httpx.AsyncClient() as client:
logger.debug(
"%s %s, Request<headers=%s params=%s data=%s>",
method,
url,
req_headers,
params,
data,
)
res = await client.request(
method,
url,
headers=req_headers,
params=params,
json=data,
timeout=5,
)
http_text = res.text
logger.debug(
"%s %s, Response<status_code=%s headers=%s http_text=%s>",
method,
url,
res.status_code,
req_headers,
http_text,
)
res.raise_for_status()
if res.status_code == "204":
return None
return res.json()
| true
| true
|
f7016aa5a1922d65f259e33893ab7489e6d9026d
| 436
|
py
|
Python
|
main.py
|
flaviopangracio/ort_music
|
604e4d918d3057ee7ea3716bd3dfb77072e81ba3
|
[
"MIT"
] | null | null | null |
main.py
|
flaviopangracio/ort_music
|
604e4d918d3057ee7ea3716bd3dfb77072e81ba3
|
[
"MIT"
] | null | null | null |
main.py
|
flaviopangracio/ort_music
|
604e4d918d3057ee7ea3716bd3dfb77072e81ba3
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import os
intents = discord.Intents.default()
intents.members = True
testing = False
client = commands.Bot(command_prefix = "-", case_insensitive = True, intents=intents)
client.remove_command('help')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
client.run('# Discord Bot Token here')
| 24.222222
| 86
| 0.697248
|
import discord
from discord.ext import commands
import os
intents = discord.Intents.default()
intents.members = True
testing = False
client = commands.Bot(command_prefix = "-", case_insensitive = True, intents=intents)
client.remove_command('help')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
client.run('# Discord Bot Token here')
| true
| true
|
f7016acf98feae99af9da1775ed115b17df2200d
| 8,965
|
py
|
Python
|
jack/train_reader.py
|
elyase/jack
|
a4f43a4012a540d55d2e05d8a904e6f8cc3002f1
|
[
"MIT"
] | 192
|
2017-10-19T18:04:56.000Z
|
2019-09-21T23:29:03.000Z
|
jack/train_reader.py
|
elyase/jack
|
a4f43a4012a540d55d2e05d8a904e6f8cc3002f1
|
[
"MIT"
] | 120
|
2017-10-16T09:46:07.000Z
|
2019-06-20T18:34:24.000Z
|
jack/train_reader.py
|
elyase/jack
|
a4f43a4012a540d55d2e05d8a904e6f8cc3002f1
|
[
"MIT"
] | 50
|
2017-10-19T09:57:45.000Z
|
2019-07-24T13:46:26.000Z
|
# -*- coding: utf-8 -*-
import logging
import math
import os
import random
import shutil
import tensorflow as tf
from jack import readers
from jack.core.tensorflow import TFReader
from jack.eval import evaluate_reader, pretty_print_results
from jack.util.hooks import LossHook, ExamplesPerSecHook, ETAHook
logger = logging.getLogger(__name__)
def train(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
if isinstance(reader, TFReader):
train_tensorflow(reader, train_data, test_data, dev_data, configuration, debug)
else:
train_pytorch(reader, train_data, test_data, dev_data, configuration, debug)
def train_tensorflow(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
import tensorflow as tf
seed = configuration.get('seed', 0)
# make everything deterministic
random.seed(seed)
tf.set_random_seed(seed)
clip_value = configuration.get('clip_value')
batch_size = configuration.get('batch_size')
dev_batch_size = configuration.get('dev_batch_size') or batch_size
epochs = configuration.get('epochs')
l2 = configuration.get('l2')
optimizer = configuration.get('optimizer')
learning_rate = configuration.get('learning_rate')
min_learning_rate = configuration.get('min_learning_rate')
learning_rate_decay = configuration.get('learning_rate_decay')
log_interval = configuration.get('log_interval')
validation_interval = configuration.get('validation_interval')
tensorboard_folder = configuration.get('tensorboard_folder')
reader_type = configuration.get('reader')
save_dir = configuration.get('save_dir')
write_metrics_to = configuration.get('write_metrics_to')
if clip_value != 0.0:
clip_value = - abs(clip_value), abs(clip_value)
learning_rate = tf.get_variable("learning_rate", initializer=learning_rate, dtype=tf.float32, trainable=False)
lr_decay_op = learning_rate.assign(tf.maximum(learning_rate_decay * learning_rate, min_learning_rate))
name_to_optimizer = {
'gd': tf.train.GradientDescentOptimizer,
'adam': tf.train.AdamOptimizer,
'adagrad': tf.train.AdagradOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'rmsprop': tf.train.RMSPropOptimizer
}
if optimizer not in name_to_optimizer:
raise ValueError('Unknown optimizer: {}'.format(optimizer))
tf_optimizer_class = name_to_optimizer[optimizer]
tf_optimizer = tf_optimizer_class(learning_rate=learning_rate)
sw = None
if tensorboard_folder is not None:
if os.path.exists(tensorboard_folder):
shutil.rmtree(tensorboard_folder)
sw = tf.summary.FileWriter(tensorboard_folder)
# Hooks
iter_interval = 1 if debug else log_interval
hooks = [LossHook(reader, iter_interval, summary_writer=sw),
ETAHook(reader, iter_interval, int(math.ceil(len(train_data) / batch_size)), epochs),
ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]
preferred_metric, best_metric = readers.eval_hooks[reader_type].preferred_metric_and_initial_score()
def side_effect(metrics, prev_metric):
"""Returns: a state (in this case a metric) that is used as input for the next call"""
if prev_metric is None: # store whole reader only at beginning of training
reader.store(save_dir)
m = metrics[preferred_metric]
if prev_metric is not None and m < prev_metric:
reader.session.run(lr_decay_op)
logger.info("Decayed learning rate to: %.5f" % reader.session.run(learning_rate))
elif m > best_metric[0] and save_dir is not None:
best_metric[0] = m
reader.model_module.store(os.path.join(save_dir, "model_module"))
logger.info("Saving reader to: %s" % save_dir)
return m
# this is the standard hook for the reader
hooks.append(readers.eval_hooks[reader_type](
reader, dev_data, dev_batch_size, summary_writer=sw, side_effect=side_effect,
iter_interval=validation_interval,
epoch_interval=(1 if validation_interval is None else None),
write_metrics_to=write_metrics_to))
# Train
reader.train(tf_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,
l2=l2, clip=clip_value, clip_op=tf.clip_by_value, summary_writer=sw)
# Test final reader
if dev_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, dev_data, batch_size)
logger.info("############### Results on the Dev Set##############")
pretty_print_results(result_dict)
if test_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, test_data, batch_size)
logger.info("############### Results on the Test Set##############")
pretty_print_results(result_dict)
def train_pytorch(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
import torch
seed = configuration.get('seed')
# make everything deterministic
random.seed(seed)
torch.manual_seed(seed)
clip_value = configuration.get('clip_value')
batch_size = configuration.get('batch_size')
epochs = configuration.get('epochs')
l2 = configuration.get('l2')
optimizer = configuration.get('optimizer')
learning_rate = configuration.get('learning_rate')
learning_rate_decay = configuration.get('learning_rate_decay')
log_interval = configuration.get('log_interval')
validation_interval = configuration.get('validation_interval')
tensorboard_folder = configuration.get('tensorboard_folder')
model = configuration.get('reader')
save_dir = configuration.get('save_dir')
write_metrics_to = configuration.get('write_metrics_to')
# need setup here already :(
reader.setup_from_data(train_data, is_training=True)
if clip_value != 0.0:
clip_value = - abs(clip_value), abs(clip_value)
name_to_optimizer = {
'gd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adagrad': torch.optim.Adagrad,
'adadelta': torch.optim.Adadelta
}
if optimizer not in name_to_optimizer:
raise ValueError('Unknown optimizer: {}'.format(optimizer))
torch_optimizer_class = name_to_optimizer[optimizer]
params = list(reader.model_module.prediction_module.parameters())
params.extend(reader.model_module.loss_module.parameters())
torch_optimizer = torch_optimizer_class(params, lr=learning_rate)
sw = None
if tensorboard_folder is not None:
if os.path.exists(tensorboard_folder):
shutil.rmtree(tensorboard_folder)
sw = tf.summary.FileWriter(tensorboard_folder)
# Hooks
iter_interval = 1 if debug else log_interval
hooks = [LossHook(reader, iter_interval, summary_writer=sw),
ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]
preferred_metric, best_metric = readers.eval_hooks[model].preferred_metric_and_initial_score()
def side_effect(metrics, prev_metric):
"""Returns: a state (in this case a metric) that is used as input for the next call"""
m = metrics[preferred_metric]
if prev_metric is not None and m < prev_metric:
for param_group in torch_optimizer.param_groups:
param_group['lr'] *= learning_rate_decay
logger.info("Decayed learning rate to: %.5f" % param_group['lr'])
elif m > best_metric[0] and save_dir is not None:
best_metric[0] = m
if prev_metric is None: # store whole model only at beginning of training
reader.store(save_dir)
else:
reader.model_module.store(os.path.join(save_dir, "model_module"))
logger.info("Saving model to: %s" % save_dir)
return m
# this is the standard hook for the model
hooks.append(readers.eval_hooks[model](
reader, dev_data, batch_size, summary_writer=sw, side_effect=side_effect,
iter_interval=validation_interval,
epoch_interval=(1 if validation_interval is None else None),
write_metrics_to=write_metrics_to))
# Train
reader.train(torch_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,
l2=l2, clip=clip_value)
# Test final model
if dev_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, dev_data, batch_size)
logger.info("############### Results on the Dev Set##############")
pretty_print_results(result_dict)
if test_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, test_data, batch_size)
logger.info("############### Results on the Test Set##############")
pretty_print_results(result_dict)
| 40.201794
| 114
| 0.695482
|
import logging
import math
import os
import random
import shutil
import tensorflow as tf
from jack import readers
from jack.core.tensorflow import TFReader
from jack.eval import evaluate_reader, pretty_print_results
from jack.util.hooks import LossHook, ExamplesPerSecHook, ETAHook
logger = logging.getLogger(__name__)
def train(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
if isinstance(reader, TFReader):
train_tensorflow(reader, train_data, test_data, dev_data, configuration, debug)
else:
train_pytorch(reader, train_data, test_data, dev_data, configuration, debug)
def train_tensorflow(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
import tensorflow as tf
seed = configuration.get('seed', 0)
random.seed(seed)
tf.set_random_seed(seed)
clip_value = configuration.get('clip_value')
batch_size = configuration.get('batch_size')
dev_batch_size = configuration.get('dev_batch_size') or batch_size
epochs = configuration.get('epochs')
l2 = configuration.get('l2')
optimizer = configuration.get('optimizer')
learning_rate = configuration.get('learning_rate')
min_learning_rate = configuration.get('min_learning_rate')
learning_rate_decay = configuration.get('learning_rate_decay')
log_interval = configuration.get('log_interval')
validation_interval = configuration.get('validation_interval')
tensorboard_folder = configuration.get('tensorboard_folder')
reader_type = configuration.get('reader')
save_dir = configuration.get('save_dir')
write_metrics_to = configuration.get('write_metrics_to')
if clip_value != 0.0:
clip_value = - abs(clip_value), abs(clip_value)
learning_rate = tf.get_variable("learning_rate", initializer=learning_rate, dtype=tf.float32, trainable=False)
lr_decay_op = learning_rate.assign(tf.maximum(learning_rate_decay * learning_rate, min_learning_rate))
name_to_optimizer = {
'gd': tf.train.GradientDescentOptimizer,
'adam': tf.train.AdamOptimizer,
'adagrad': tf.train.AdagradOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'rmsprop': tf.train.RMSPropOptimizer
}
if optimizer not in name_to_optimizer:
raise ValueError('Unknown optimizer: {}'.format(optimizer))
tf_optimizer_class = name_to_optimizer[optimizer]
tf_optimizer = tf_optimizer_class(learning_rate=learning_rate)
sw = None
if tensorboard_folder is not None:
if os.path.exists(tensorboard_folder):
shutil.rmtree(tensorboard_folder)
sw = tf.summary.FileWriter(tensorboard_folder)
iter_interval = 1 if debug else log_interval
hooks = [LossHook(reader, iter_interval, summary_writer=sw),
ETAHook(reader, iter_interval, int(math.ceil(len(train_data) / batch_size)), epochs),
ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]
preferred_metric, best_metric = readers.eval_hooks[reader_type].preferred_metric_and_initial_score()
def side_effect(metrics, prev_metric):
if prev_metric is None: reader.store(save_dir)
m = metrics[preferred_metric]
if prev_metric is not None and m < prev_metric:
reader.session.run(lr_decay_op)
logger.info("Decayed learning rate to: %.5f" % reader.session.run(learning_rate))
elif m > best_metric[0] and save_dir is not None:
best_metric[0] = m
reader.model_module.store(os.path.join(save_dir, "model_module"))
logger.info("Saving reader to: %s" % save_dir)
return m
hooks.append(readers.eval_hooks[reader_type](
reader, dev_data, dev_batch_size, summary_writer=sw, side_effect=side_effect,
iter_interval=validation_interval,
epoch_interval=(1 if validation_interval is None else None),
write_metrics_to=write_metrics_to))
reader.train(tf_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,
l2=l2, clip=clip_value, clip_op=tf.clip_by_value, summary_writer=sw)
if dev_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, dev_data, batch_size)
logger.info("############### Results on the Dev Set##############")
pretty_print_results(result_dict)
if test_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, test_data, batch_size)
logger.info("############### Results on the Test Set##############")
pretty_print_results(result_dict)
def train_pytorch(reader, train_data, test_data, dev_data, configuration: dict, debug=False):
import torch
seed = configuration.get('seed')
random.seed(seed)
torch.manual_seed(seed)
clip_value = configuration.get('clip_value')
batch_size = configuration.get('batch_size')
epochs = configuration.get('epochs')
l2 = configuration.get('l2')
optimizer = configuration.get('optimizer')
learning_rate = configuration.get('learning_rate')
learning_rate_decay = configuration.get('learning_rate_decay')
log_interval = configuration.get('log_interval')
validation_interval = configuration.get('validation_interval')
tensorboard_folder = configuration.get('tensorboard_folder')
model = configuration.get('reader')
save_dir = configuration.get('save_dir')
write_metrics_to = configuration.get('write_metrics_to')
reader.setup_from_data(train_data, is_training=True)
if clip_value != 0.0:
clip_value = - abs(clip_value), abs(clip_value)
name_to_optimizer = {
'gd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adagrad': torch.optim.Adagrad,
'adadelta': torch.optim.Adadelta
}
if optimizer not in name_to_optimizer:
raise ValueError('Unknown optimizer: {}'.format(optimizer))
torch_optimizer_class = name_to_optimizer[optimizer]
params = list(reader.model_module.prediction_module.parameters())
params.extend(reader.model_module.loss_module.parameters())
torch_optimizer = torch_optimizer_class(params, lr=learning_rate)
sw = None
if tensorboard_folder is not None:
if os.path.exists(tensorboard_folder):
shutil.rmtree(tensorboard_folder)
sw = tf.summary.FileWriter(tensorboard_folder)
iter_interval = 1 if debug else log_interval
hooks = [LossHook(reader, iter_interval, summary_writer=sw),
ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]
preferred_metric, best_metric = readers.eval_hooks[model].preferred_metric_and_initial_score()
def side_effect(metrics, prev_metric):
m = metrics[preferred_metric]
if prev_metric is not None and m < prev_metric:
for param_group in torch_optimizer.param_groups:
param_group['lr'] *= learning_rate_decay
logger.info("Decayed learning rate to: %.5f" % param_group['lr'])
elif m > best_metric[0] and save_dir is not None:
best_metric[0] = m
if prev_metric is None: reader.store(save_dir)
else:
reader.model_module.store(os.path.join(save_dir, "model_module"))
logger.info("Saving model to: %s" % save_dir)
return m
hooks.append(readers.eval_hooks[model](
reader, dev_data, batch_size, summary_writer=sw, side_effect=side_effect,
iter_interval=validation_interval,
epoch_interval=(1 if validation_interval is None else None),
write_metrics_to=write_metrics_to))
reader.train(torch_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,
l2=l2, clip=clip_value)
if dev_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, dev_data, batch_size)
logger.info("############### Results on the Dev Set##############")
pretty_print_results(result_dict)
if test_data is not None and save_dir is not None:
reader.load(save_dir)
result_dict = evaluate_reader(reader, test_data, batch_size)
logger.info("############### Results on the Test Set##############")
pretty_print_results(result_dict)
| true
| true
|
f7016b5152ebd061fcd29b371db0ceded5c080ba
| 23,671
|
py
|
Python
|
utils/batchnorm_layer.py
|
limberc/hypercl
|
ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807
|
[
"Apache-2.0"
] | null | null | null |
utils/batchnorm_layer.py
|
limberc/hypercl
|
ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807
|
[
"Apache-2.0"
] | null | null | null |
utils/batchnorm_layer.py
|
limberc/hypercl
|
ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Christian Henning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
- **title** :utils/batchnorm_layer.py
- **author** :ch
- **contact** :[email protected]
- **created** :09/02/2019
- **version** :1.0
- **python_version** :3.6.8
Implementation of a hypernet compatible batchnorm layer.
The joint use of batch-normalization and hypernetworks is not straight forward,
mainly due to the statistics accumulated by the batch-norm operation which
expect the weights of the main network to only change slowly. If a hypernetwork
replaces the whole set of weights, the statistics previously estimated by the
batch-norm layer might be completely off.
To circumvent this problem, we provide multiple solutions:
- In a continual learning setting with one set of weights per task, we can
simply estimate and store statistics per task (hence, the batch-norm
operation has to be conditioned on the task).
- The statistics are distilled into the hypernetwork. This would require
the addition of an extra loss term.
- The statistics can be treated as parameters that are outputted by the
hypernetwork. In this case, nothing enforces that these "statistics"
behave similar to statistics that would result from a running estimate
(hence, the resulting operation might have nothing in common with batch-
norm).
- Always use the statistics estimated on the current batch.
Note, we also provide the option of turning off the statistics, in which case
the statistics will be set to zero mean and unit variance. This is helpful when
interpreting batch-normalization as a general form of gain modulation (i.e.,
just applying a shift and scale to neural activities).
"""
from warnings import warn
import torch
import torch.nn as nn
import torch.nn.functional as F
class BatchNormLayer(nn.Module):
r"""Hypernetwork-compatible batch-normalization layer.
Note, batch normalization performs the following operation
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \
\gamma + \beta
This class allows to deviate from this standard implementation in order to
provide the flexibility required when using hypernetworks. Therefore, we
slightly change the notation to
.. math::
y = \frac{x - m_{\text{stats}}^{(t)}}{\sqrt{v_{\text{stats}}^{(t)} + \
\epsilon}} * \gamma^{(t)} + \beta^{(t)}
We use this notation to highlight that the running statistics
:math:`m_{\text{stats}}^{(t)}` and :math:`v_{\text{stats}}^{(t)}` are not
necessarily estimates resulting from mean and variance computation but might
be learned parameters (e.g., the outputs of a hypernetwork).
We additionally use the superscript :math:`(t)` to denote that the gain
:math:`\gamma`, offset :math:`\beta` and statistics may be dynamically
selected based on some external context information.
This class provides the possibility to checkpoint statistics
:math:`m_{\text{stats}}^{(t)}` and :math:`v_{\text{stats}}^{(t)}`, but
**not** gains and offsets.
.. note::
If context-dependent gains :math:`\gamma^{(t)}` and offsets
:math:`\beta^{(t)}` are required, then they have to be maintained
externally, e.g., via a task-conditioned hypernetwork (see
`this paper`_ for an example) and passed to the :meth:`forward` method.
.. _this paper: https://arxiv.org/abs/1906.00695
Attributes:
weights: A list of all internal weights of this layer. If all
weights are assumed to be generated externally, then this
attribute will be ``None``.
param_shapes: A list of list of integers. Each list represents the
shape of a parameter tensor. Note, this attribute is
independent of the attribute :attr:`weights`, it always comprises
the shapes of all weight tensors as if the network would be stand-
alone (i.e., no weights being passed to the :meth:`forward` method).
Note, unless ``learnable_stats`` is enabled, the layer statistics
are not considered here.
hyper_shapes: A list of list of integers. Each list represents the
shape of a weight tensor that can be passed to the :meth:`forward`
method. If all weights are maintained internally, then this
attribute will be ``None``.
Specifically, this attribute is controlled by the argument
``affine``. If ``affine`` is ``True``, this attribute will be
``None``. Otherwise this attribute contains the shape of
:math:`\gamma` and :math:`\beta`.
num_stats: The number :math:`T` of internally managed statistics
:math:`\{(m_{\text{stats}}^{(1)}, v_{\text{stats}}^{(1)}), \dots, \
(m_{\text{stats}}^{(T)}, v_{\text{stats}}^{(T)}) \}`. This number is
incremented everytime the method :meth:`checkpoint_stats` is called.
"""
def __init__(self, num_features, momentum=0.1, affine=True,
track_running_stats=True, frozen_stats=False,
learnable_stats=False):
r"""
Args:
num_features: See argument ``num_features``, for instance, of class
:class:`torch.nn.BatchNorm1d`.
momentum: See argument ``momentum`` of class
:class:`torch.nn.BatchNorm1d`.
affine: See argument ``affine`` of class
:class:`torch.nn.BatchNorm1d`. If set to :code:`False`, the
input activity will simply be "whitened" according to the
applied layer statistics (except if gain :math:`\gamma` and
offset :math:`\beta` are passed to the :meth:`forward` method).
Note, if ``learnable_stats`` is :code:`False`, then setting
``affine`` to :code:`False` results in no learnable weights for
this layer (running stats might still be updated, but not via
gradient descent).
Note, even if this option is ``False``, one may still pass a
gain :math:`\gamma` and offset :math:`\beta` to the
:meth:`forward` method.
track_running_stats: See argument ``track_running_stats`` of class
:class:`torch.nn.BatchNorm1d`.
frozen_stats: If ``True``, the layer statistics are frozen at their
initial values of :math:`\gamma = 1` and :math:`\beta = 0`,
i.e., layer activity will not be whitened.
Note, this option requires ``track_running_stats`` to be set to
``False``.
learnable_stats: If ``True``, the layer statistics are initialized
as learnable parameters (:code:`requires_grad=True`).
Note, these extra parameters will be maintained internally and
not added to the :attr:`weights`. Statistics can always be
maintained externally and passed to the :meth:`forward` method.
Note, this option requires ``track_running_stats`` to be set to
``False``.
"""
super(BatchNormLayer, self).__init__()
if learnable_stats:
# FIXME We need our custom stats computation for this.
# The running stats updated by `torch.nn.functional.batch_norm` do
# not allow backpropagation.
# See here on how they are computed:
# https://github.com/pytorch/pytorch/blob/96fe2b4ecbbd02143d95f467655a2d697282ac32/aten/src/ATen/native/Normalization.cpp#L137
raise NotImplementedError('Option "learnable_stats" has not been ' +
'implemented yet!')
if momentum is None:
# If one wants to implement this, then please note that the
# attribute `num_batches_tracked` has to be added. Also, note the
# extra code for computing the momentum value in the forward method
# of class `_BatchNorm`:
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/batchnorm.html#_BatchNorm
raise NotImplementedError('This reimplementation of PyTorch its ' +
'batchnorm layer does not support ' +
'setting "momentum" to None.')
if learnable_stats and track_running_stats:
raise ValueError('Option "track_running_stats" must be set to ' +
'False when enabling "learnable_stats".')
if frozen_stats and track_running_stats:
raise ValueError('Option "track_running_stats" must be set to ' +
'False when enabling "frozen_stats".')
self._num_features = num_features
self._momentum = momentum
self._affine = affine
self._track_running_stats = track_running_stats
self._frozen_stats = frozen_stats
self._learnable_stats = learnable_stats
self.register_buffer('_num_stats', torch.tensor(0, dtype=torch.long))
self._weights = nn.ParameterList()
self._param_shapes = [[num_features], [num_features]]
if affine:
# Gamma
self.register_parameter('scale', nn.Parameter( \
torch.Tensor(num_features), requires_grad=True))
# Beta
self.register_parameter('bias', nn.Parameter( \
torch.Tensor(num_features), requires_grad=True))
self._weights.append(self.scale)
self._weights.append(self.bias)
nn.init.ones_(self.scale)
nn.init.zeros_(self.bias)
elif not learnable_stats:
self._weights = None
if learnable_stats:
# Don't forget to add the new params to `self._weights`.
# Don't forget to add shapes to `self._param_shapes`.
raise NotImplementedError()
elif track_running_stats or frozen_stats:
# Note, in case of frozen stats, we just don't update the stats
# initialized here later on.
self.checkpoint_stats()
else:
mname, vname = self._stats_names(0)
self.register_buffer(mname, None)
self.register_buffer(vname, None)
@property
def weights(self):
"""Getter for read-only attribute :attr:`weights`.
Returns:
A :class:`torch.nn.ParameterList` or ``None``, if no parameters are
internally maintained.
"""
return self._weights
@property
def param_shapes(self):
"""Getter for read-only attribute :attr:`param_shapes`.
Returns:
A list of lists of integers.
"""
return self._param_shapes
@property
def hyper_shapes(self):
"""Getter for read-only attribute :attr:`hyper_shapes`.
Returns:
A list of lists of integers.
"""
# FIXME not implemented attribute. Do we even need the attribute, given
# that all components are individually passed to the forward method?
raise NotImplementedError('Not implemented yet!')
return self._hyper_shapes
@property
def num_stats(self):
"""Getter for read-only attribute :attr:`num_stats`.
Returns:
(int)
"""
return self._num_stats
def forward(self, inputs, running_mean=None, running_var=None, weight=None,
bias=None, stats_id=None):
r"""Apply batch normalization to given layer activations.
Based on the state if this module (attribute :attr:`training`), the
configuration of this layer and the parameters currently passed, the
behavior of this function will be different.
The core of this method still relies on the function
:func:`torch.nn.functional.batch_norm`. In the following we list the
different behaviors of this method based on the context.
**In training mode:**
We first consider the case that this module is in training mode, i.e.,
:meth:`torch.nn.Module.train` has been called.
Usually, during training, the running statistics are not used when
computing the output, instead the statistics computed on the current
batch are used (denoted by *use batch stats* in the table below).
However, the batch statistics are typically updated during training
(denoted by *update running stats* in the table below).
The above described scenario would correspond to passing batch
statistics to the function :func:`torch.nn.functional.batch_norm` and
setting the parameter ``training`` to ``True``.
+----------------------+---------------------+-------------------------+
| **training mode** | **use batch stats** | **update running stats**|
+----------------------+---------------------+-------------------------+
| given stats | Yes | Yes |
+----------------------+---------------------+-------------------------+
| track running stats | Yes | Yes |
+----------------------+---------------------+-------------------------+
| frozen stats | No | No |
+----------------------+---------------------+-------------------------+
| learnable stats | Yes | Yes [1]_ |
+----------------------+---------------------+-------------------------+
|no track running stats| Yes | No |
+----------------------+---------------------+-------------------------+
The meaning of each row in this table is as follows:
- **given stats**: External stats are provided via the parameters
``running_mean`` and ``running_var``.
- **track running stats**: If ``track_running_stats`` was set to
``True`` in the constructor and no stats were given.
- **frozen stats**: If ``frozen_stats`` was set to ``True`` in the
constructor and no stats were given.
- **learnable stats**: If ``learnable_stats`` was set to ``True`` in
the constructor and no stats were given.
- **no track running stats**: If none of the above options apply,
then the statistics will always be computed from the current batch
(also in eval mode).
.. note::
If provided, running stats specified via ``running_mean`` and
``running_var`` always have priority.
.. [1] We use a custom implementation to update the running statistics,
that is compatible with backpropagation.
**In evaluation mode:**
We now consider the case that this module is in evaluation mode, i.e.,
:meth:`torch.nn.Module.eval` has been called.
Here is the same table as above just for the evaluation mode.
+----------------------+---------------------+-------------------------+
| **evaluation mode** | **use batch stats** | **update running stats**|
+----------------------+---------------------+-------------------------+
| track running stats | No | No |
+----------------------+---------------------+-------------------------+
| frozen stats | No | No |
+----------------------+---------------------+-------------------------+
| learnable stats | No | No |
+----------------------+---------------------+-------------------------+
| given stats | No | No |
+----------------------+---------------------+-------------------------+
|no track running stats| Yes | No |
+----------------------+---------------------+-------------------------+
Args:
inputs: The inputs to the batchnorm layer.
running_mean (optional): Running mean stats
:math:`m_{\text{stats}}`. This option has priority, i.e., any
internally maintained statistics are ignored if given.
.. note::
If specified, then ``running_var`` also has to be specified.
running_var (optional): Similar to option ``running_mean``, but for
the running variance stats :math:`v_{\text{stats}}`
.. note::
If specified, then ``running_mean`` also has to be
specified.
weight (optional): The gain factors :math:`\gamma`. If given, any
internal gains are ignored. If option ``affine`` was set to
``False`` in the constructor and this option remains ``None``,
then no gains are multiplied to the "whitened" inputs.
bias (optional): The behavior of this option is similar to option
``weight``, except that this option represents the offsets
:math:`\beta`.
stats_id: This argument is optional except if multiple running
stats checkpoints exist (i.e., attribute :attr:`num_stats` is
greater than 1) and no running stats have been provided to this
method.
.. note::
This argument is ignored if running stats have been passed.
Returns:
The layer activation ``inputs`` after batch-norm has been applied.
"""
assert (running_mean is None and running_var is None or \
running_mean is not None and running_var is not None)
if not self._affine:
if weight is None or bias is None:
raise ValueError('Layer was generated in non-affine mode. ' +
'Therefore, arguments "weight" and "bias" ' +
'may not be None.')
# No gains given but we have internal gains.
# Otherwise, if no gains are given we leave `weight` as None.
if weight is None and self._affine:
weight = self.scale
if bias is None and self._affine:
bias = self.bias
stats_given = running_mean is not None
if (running_mean is None or running_var is None):
if stats_id is None and self.num_stats > 1:
raise ValueError('Parameter "stats_id" is not defined but ' +
'multiple running stats are available.')
elif self._track_running_stats:
if stats_id is None:
stats_id = 0
assert (stats_id < self.num_stats)
rm, rv = self.get_stats(stats_id)
if running_mean is None:
running_mean = rm
if running_var is None:
running_var = rv
elif stats_id is not None:
warn('Parameter "stats_id" is ignored since running stats have ' +
'been provided.')
momentum = self._momentum
if stats_given or self._track_running_stats:
return F.batch_norm(inputs, running_mean, running_var,
weight=weight, bias=bias,
training=self.training, momentum=momentum)
if self._learnable_stats:
raise NotImplementedError()
if self._frozen_stats:
return F.batch_norm(inputs, running_mean, running_var,
weight=weight, bias=bias, training=False)
# TODO implement scale and shift here. Note, that `running_mean` and
# `running_var` are always 0 and 1, resp. Therefore, the call to
# `F.batch_norm` is a waste of computation.
# ret = inputs
# if weight is not None:
# # Multiply `ret` with `weight` such that dimensions are
# # respected.
# pass
# if bias is not None:
# # Add `bias` to modified `ret` such that dimensions are
# # respected.
# pass
# return ret
else:
assert (not self._track_running_stats)
# Always compute statistics based on current batch.
return F.batch_norm(inputs, None, None, weight=weight, bias=bias,
training=True, momentum=momentum)
def checkpoint_stats(self, device=None):
"""Buffers for a new set of running stats will be registered.
Calling this function will also increment the attribute
:attr:`num_stats`.
Args:
device (optional): If not provided, the newly created statistics
will either be moved to the device of the most recent statistics
or to CPU if no prior statistics exist.
"""
assert (self._track_running_stats or \
self._frozen_stats and self._num_stats == 0)
if device is None:
if self.num_stats > 0:
mname_old, _ = self._stats_names(self._num_stats - 1)
device = getattr(self, mname_old).device
if self._learnable_stats:
raise NotImplementedError()
mname, vname = self._stats_names(self._num_stats)
self._num_stats += 1
self.register_buffer(mname, torch.zeros(self._num_features,
device=device))
self.register_buffer(vname, torch.ones(self._num_features,
device=device))
def get_stats(self, stats_id=None):
"""Get a set of running statistics (means and variances).
Args:
stats_id (optional): ID of stats. If not provided, the most recent
stats are returned.
Returns:
(tuple): Tuple containing:
- **running_mean**
- **running_var**
"""
if stats_id is None:
stats_id = self.num_stats - 1
assert (stats_id < self.num_stats)
mname, vname = self._stats_names(stats_id)
running_mean = getattr(self, mname)
running_var = getattr(self, vname)
return running_mean, running_var
def _stats_names(self, stats_id):
"""Get the buffer names for mean and variance statistics depending on
the ``stats_id``, i.e., the ID of the stats checkpoint.
Args:
stats_id: ID of stats.
Returns:
(tuple): Tuple containing:
- **mean_name**
- **var_name**
"""
mean_name = 'mean_%d' % stats_id
var_name = 'var_%d' % stats_id
return mean_name, var_name
if __name__ == '__main__':
pass
| 44.080074
| 138
| 0.567023
|
from warnings import warn
import torch
import torch.nn as nn
import torch.nn.functional as F
class BatchNormLayer(nn.Module):
def __init__(self, num_features, momentum=0.1, affine=True,
track_running_stats=True, frozen_stats=False,
learnable_stats=False):
super(BatchNormLayer, self).__init__()
if learnable_stats:
raise NotImplementedError('Option "learnable_stats" has not been ' +
'implemented yet!')
if momentum is None:
raise NotImplementedError('This reimplementation of PyTorch its ' +
'batchnorm layer does not support ' +
'setting "momentum" to None.')
if learnable_stats and track_running_stats:
raise ValueError('Option "track_running_stats" must be set to ' +
'False when enabling "learnable_stats".')
if frozen_stats and track_running_stats:
raise ValueError('Option "track_running_stats" must be set to ' +
'False when enabling "frozen_stats".')
self._num_features = num_features
self._momentum = momentum
self._affine = affine
self._track_running_stats = track_running_stats
self._frozen_stats = frozen_stats
self._learnable_stats = learnable_stats
self.register_buffer('_num_stats', torch.tensor(0, dtype=torch.long))
self._weights = nn.ParameterList()
self._param_shapes = [[num_features], [num_features]]
if affine:
self.register_parameter('scale', nn.Parameter( \
torch.Tensor(num_features), requires_grad=True))
self.register_parameter('bias', nn.Parameter( \
torch.Tensor(num_features), requires_grad=True))
self._weights.append(self.scale)
self._weights.append(self.bias)
nn.init.ones_(self.scale)
nn.init.zeros_(self.bias)
elif not learnable_stats:
self._weights = None
if learnable_stats:
# Don't forget to add shapes to `self._param_shapes`.
raise NotImplementedError()
elif track_running_stats or frozen_stats:
# initialized here later on.
self.checkpoint_stats()
else:
mname, vname = self._stats_names(0)
self.register_buffer(mname, None)
self.register_buffer(vname, None)
@property
def weights(self):
return self._weights
@property
def param_shapes(self):
return self._param_shapes
@property
def hyper_shapes(self):
# FIXME not implemented attribute. Do we even need the attribute, given
# that all components are individually passed to the forward method?
raise NotImplementedError('Not implemented yet!')
return self._hyper_shapes
@property
def num_stats(self):
return self._num_stats
def forward(self, inputs, running_mean=None, running_var=None, weight=None,
bias=None, stats_id=None):
assert (running_mean is None and running_var is None or \
running_mean is not None and running_var is not None)
if not self._affine:
if weight is None or bias is None:
raise ValueError('Layer was generated in non-affine mode. ' +
'Therefore, arguments "weight" and "bias" ' +
'may not be None.')
# No gains given but we have internal gains.
# Otherwise, if no gains are given we leave `weight` as None.
if weight is None and self._affine:
weight = self.scale
if bias is None and self._affine:
bias = self.bias
stats_given = running_mean is not None
if (running_mean is None or running_var is None):
if stats_id is None and self.num_stats > 1:
raise ValueError('Parameter "stats_id" is not defined but ' +
'multiple running stats are available.')
elif self._track_running_stats:
if stats_id is None:
stats_id = 0
assert (stats_id < self.num_stats)
rm, rv = self.get_stats(stats_id)
if running_mean is None:
running_mean = rm
if running_var is None:
running_var = rv
elif stats_id is not None:
warn('Parameter "stats_id" is ignored since running stats have ' +
'been provided.')
momentum = self._momentum
if stats_given or self._track_running_stats:
return F.batch_norm(inputs, running_mean, running_var,
weight=weight, bias=bias,
training=self.training, momentum=momentum)
if self._learnable_stats:
raise NotImplementedError()
if self._frozen_stats:
return F.batch_norm(inputs, running_mean, running_var,
weight=weight, bias=bias, training=False)
# TODO implement scale and shift here. Note, that `running_mean` and
# `running_var` are always 0 and 1, resp. Therefore, the call to
# `F.batch_norm` is a waste of computation.
# ret = inputs
# if weight is not None:
# # Multiply `ret` with `weight` such that dimensions are
# # respected.
# pass
# if bias is not None:
# # Add `bias` to modified `ret` such that dimensions are
# # respected.
# pass
# return ret
else:
assert (not self._track_running_stats)
# Always compute statistics based on current batch.
return F.batch_norm(inputs, None, None, weight=weight, bias=bias,
training=True, momentum=momentum)
def checkpoint_stats(self, device=None):
assert (self._track_running_stats or \
self._frozen_stats and self._num_stats == 0)
if device is None:
if self.num_stats > 0:
mname_old, _ = self._stats_names(self._num_stats - 1)
device = getattr(self, mname_old).device
if self._learnable_stats:
raise NotImplementedError()
mname, vname = self._stats_names(self._num_stats)
self._num_stats += 1
self.register_buffer(mname, torch.zeros(self._num_features,
device=device))
self.register_buffer(vname, torch.ones(self._num_features,
device=device))
def get_stats(self, stats_id=None):
if stats_id is None:
stats_id = self.num_stats - 1
assert (stats_id < self.num_stats)
mname, vname = self._stats_names(stats_id)
running_mean = getattr(self, mname)
running_var = getattr(self, vname)
return running_mean, running_var
def _stats_names(self, stats_id):
mean_name = 'mean_%d' % stats_id
var_name = 'var_%d' % stats_id
return mean_name, var_name
if __name__ == '__main__':
pass
| true
| true
|
f7016c5c21cf950aa7d4520f766ef05b3aa9a22c
| 14,028
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/estimators/linear.py
|
calebchoo/modulabs
|
10fbaf0581700641fc9b38b1bd722044bfb7c638
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/learn/python/learn/estimators/linear.py
|
calebchoo/modulabs
|
10fbaf0581700641fc9b38b1bd722044bfb7c638
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/learn/python/learn/estimators/linear.py
|
calebchoo/modulabs
|
10fbaf0581700641fc9b38b1bd722044bfb7c638
|
[
"Apache-2.0"
] | 1
|
2020-03-26T00:09:00.000Z
|
2020-03-26T00:09:00.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import sdca_optimizer
from tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin
from tensorflow.python.framework import ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import tf_logging as logging
# TODO(b/29580537): Replace with @changing decorator.
def _changing(feature_columns):
if feature_columns is not None:
return
logging.warn(
"Change warning: `feature_columns` will be required after 2016-08-01.\n"
"Instructions for updating:\n"
"Pass `tf.contrib.learn.infer_real_valued_columns_from_input(x)` or"
" `tf.contrib.learn.infer_real_valued_columns_from_input_fn(input_fn)`"
" as `feature_columns`, where `x` or `input_fn` is your argument to"
" `fit`, `evaluate`, or `predict`.")
class LinearClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[occupation, education_x_occupation])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[occupation, education_x_occupation],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[occupation, education_x_occupation],
optimizer=tf.contrib.learn.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feature_columns` is `None`, then `input` must contains only real
valued `Tensor`.
"""
def __init__(self,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `LinearClassifier` estimator.
"""
_changing(feature_columns)
super(LinearClassifier, self).__init__(
model_dir=model_dir,
n_classes=n_classes,
weight_column_name=weight_column_name,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
config=config)
self._feature_columns_inferred = False
# TODO(b/29580537): Remove feature_columns inference.
def _validate_linear_feature_columns(self, features):
if self._linear_feature_columns is None:
self._linear_feature_columns = layers.infer_real_valued_columns(features)
self._feature_columns_inferred = True
elif self._feature_columns_inferred:
this_dict = {c.name: c for c in self._linear_feature_columns}
that_dict = {
c.name: c for c in layers.infer_real_valued_columns(features)
}
if this_dict != that_dict:
raise ValueError(
"Feature columns, expected %s, got %s.", (this_dict, that_dict))
def _get_train_ops(self, features, targets):
"""See base class."""
self._validate_linear_feature_columns(features)
if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
return super(LinearClassifier, self)._get_train_ops(features, targets)
# SDCA currently supports binary classification only.
if self._target_column.num_label_columns > 2:
raise ValueError(
"SDCA does not currently support multi-class classification.")
global_step = contrib_variables.get_global_step()
assert global_step
logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._linear_feature_columns,
num_outputs=self._target_column.num_label_columns,
weight_collections=[self._linear_weight_collection],
scope="linear")
with ops.control_dependencies([self._centered_bias()]):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
train_ops = self._linear_optimizer.get_train_step(
self._linear_feature_columns, self._target_column.weight_column_name,
"logistic_loss", features, targets, columns_to_variables, global_step)
return train_ops, loss
def _get_eval_ops(self, features, targets, metrics=None):
self._validate_linear_feature_columns(features)
return super(LinearClassifier, self)._get_eval_ops(
features, targets, metrics)
def _get_predict_ops(self, features):
"""See base class."""
self._validate_linear_feature_columns(features)
return super(LinearClassifier, self)._get_predict_ops(features)
@property
def weights_(self):
return self.linear_weights_
@property
def bias_(self):
return self.linear_bias_
class LinearRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""Linear regressor model.
Train a linear regression model to predict target variable value given
observation of feature values.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
estimator = LinearRegressor(
feature_columns=[occupation, education_x_occupation])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
- if `feature_columns` is `None`:
input must contains only real valued `Tensor`.
"""
def __init__(self,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=True,
target_dimension=1,
config=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: dimension of the target for multilabels.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `LinearRegressor` estimator.
"""
_changing(feature_columns)
super(LinearRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_dimension=target_dimension,
config=config)
self._feature_columns_inferred = False
# TODO(b/29580537): Remove feature_columns inference.
def _validate_linear_feature_columns(self, features):
if self._linear_feature_columns is None:
self._linear_feature_columns = layers.infer_real_valued_columns(features)
self._feature_columns_inferred = True
elif self._feature_columns_inferred:
this_dict = {c.name: c for c in self._linear_feature_columns}
that_dict = {
c.name: c for c in layers.infer_real_valued_columns(features)
}
if this_dict != that_dict:
raise ValueError(
"Feature columns, expected %s, got %s.", (this_dict, that_dict))
def _get_train_ops(self, features, targets):
"""See base class."""
if isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("SDCAOptimizer does not currently support regression.")
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_train_ops(features, targets)
def _get_eval_ops(self, features, targets, metrics=None):
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_eval_ops(
features, targets, metrics)
def _get_predict_ops(self, features):
"""See base class."""
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_predict_ops(features)
@property
def weights_(self):
return self.linear_weights_
@property
def bias_(self):
return self.linear_bias_
# TensorFlowLinearRegressor and TensorFlowLinearClassifier are deprecated.
class TensorFlowLinearRegressor(DeprecatedMixin, LinearRegressor,
_sklearn.RegressorMixin):
pass
class TensorFlowLinearClassifier(DeprecatedMixin, LinearClassifier,
_sklearn.ClassifierMixin):
pass
TensorFlowRegressor = TensorFlowLinearRegressor
TensorFlowClassifier = TensorFlowLinearClassifier
| 39.294118
| 82
| 0.71008
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import sdca_optimizer
from tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin
from tensorflow.python.framework import ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import tf_logging as logging
def _changing(feature_columns):
if feature_columns is not None:
return
logging.warn(
"Change warning: `feature_columns` will be required after 2016-08-01.\n"
"Instructions for updating:\n"
"Pass `tf.contrib.learn.infer_real_valued_columns_from_input(x)` or"
" `tf.contrib.learn.infer_real_valued_columns_from_input_fn(input_fn)`"
" as `feature_columns`, where `x` or `input_fn` is your argument to"
" `fit`, `evaluate`, or `predict`.")
class LinearClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
def __init__(self,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None):
_changing(feature_columns)
super(LinearClassifier, self).__init__(
model_dir=model_dir,
n_classes=n_classes,
weight_column_name=weight_column_name,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
config=config)
self._feature_columns_inferred = False
def _validate_linear_feature_columns(self, features):
if self._linear_feature_columns is None:
self._linear_feature_columns = layers.infer_real_valued_columns(features)
self._feature_columns_inferred = True
elif self._feature_columns_inferred:
this_dict = {c.name: c for c in self._linear_feature_columns}
that_dict = {
c.name: c for c in layers.infer_real_valued_columns(features)
}
if this_dict != that_dict:
raise ValueError(
"Feature columns, expected %s, got %s.", (this_dict, that_dict))
def _get_train_ops(self, features, targets):
self._validate_linear_feature_columns(features)
if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
return super(LinearClassifier, self)._get_train_ops(features, targets)
if self._target_column.num_label_columns > 2:
raise ValueError(
"SDCA does not currently support multi-class classification.")
global_step = contrib_variables.get_global_step()
assert global_step
logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._linear_feature_columns,
num_outputs=self._target_column.num_label_columns,
weight_collections=[self._linear_weight_collection],
scope="linear")
with ops.control_dependencies([self._centered_bias()]):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
train_ops = self._linear_optimizer.get_train_step(
self._linear_feature_columns, self._target_column.weight_column_name,
"logistic_loss", features, targets, columns_to_variables, global_step)
return train_ops, loss
def _get_eval_ops(self, features, targets, metrics=None):
self._validate_linear_feature_columns(features)
return super(LinearClassifier, self)._get_eval_ops(
features, targets, metrics)
def _get_predict_ops(self, features):
self._validate_linear_feature_columns(features)
return super(LinearClassifier, self)._get_predict_ops(features)
@property
def weights_(self):
return self.linear_weights_
@property
def bias_(self):
return self.linear_bias_
class LinearRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
def __init__(self,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=True,
target_dimension=1,
config=None):
_changing(feature_columns)
super(LinearRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_dimension=target_dimension,
config=config)
self._feature_columns_inferred = False
def _validate_linear_feature_columns(self, features):
if self._linear_feature_columns is None:
self._linear_feature_columns = layers.infer_real_valued_columns(features)
self._feature_columns_inferred = True
elif self._feature_columns_inferred:
this_dict = {c.name: c for c in self._linear_feature_columns}
that_dict = {
c.name: c for c in layers.infer_real_valued_columns(features)
}
if this_dict != that_dict:
raise ValueError(
"Feature columns, expected %s, got %s.", (this_dict, that_dict))
def _get_train_ops(self, features, targets):
if isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("SDCAOptimizer does not currently support regression.")
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_train_ops(features, targets)
def _get_eval_ops(self, features, targets, metrics=None):
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_eval_ops(
features, targets, metrics)
def _get_predict_ops(self, features):
self._validate_linear_feature_columns(features)
return super(LinearRegressor, self)._get_predict_ops(features)
@property
def weights_(self):
return self.linear_weights_
@property
def bias_(self):
return self.linear_bias_
class TensorFlowLinearRegressor(DeprecatedMixin, LinearRegressor,
_sklearn.RegressorMixin):
pass
class TensorFlowLinearClassifier(DeprecatedMixin, LinearClassifier,
_sklearn.ClassifierMixin):
pass
TensorFlowRegressor = TensorFlowLinearRegressor
TensorFlowClassifier = TensorFlowLinearClassifier
| true
| true
|
f7016c7c0c3a67ee7989e59320e22bece5f09791
| 9,486
|
py
|
Python
|
py/trtorch/_compile_spec.py
|
peri044/TRTorch
|
62c9830b24552651abbff611515114cbcaca8b7b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-18T17:26:58.000Z
|
2021-06-18T17:26:58.000Z
|
py/trtorch/_compile_spec.py
|
peri044/TRTorch
|
62c9830b24552651abbff611515114cbcaca8b7b
|
[
"BSD-3-Clause"
] | null | null | null |
py/trtorch/_compile_spec.py
|
peri044/TRTorch
|
62c9830b24552651abbff611515114cbcaca8b7b
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List, Dict, Any
import torch
import trtorch._C
from trtorch import _types
def _supported_input_size_type(input_size: Any) -> bool:
if isinstance(input_size, torch.Size):
return True
elif isinstance(input_size, tuple):
return True
elif isinstance(input_size, list):
return True
else:
raise TypeError(
"Input sizes for inputs are required to be a List, tuple or torch.Size or a Dict of three sizes (min, opt, max), found type: "
+ str(type(input_size)))
def _parse_input_ranges(input_sizes: List) -> List:
if any(not isinstance(i, dict) and not _supported_input_size_type(i) for i in input_sizes):
raise KeyError("An input size must either be a static size or a range of three sizes (min, opt, max) as Dict")
parsed_input_sizes = []
for i in input_sizes:
if isinstance(i, dict):
if all(k in i for k in ["min", "opt", "min"]):
in_range = trtorch._C.InputRange()
in_range.min = i["min"]
in_range.opt = i["opt"]
in_range.max = i["max"]
parsed_input_sizes.append(in_range)
elif "opt" in i:
in_range = trtorch._C.InputRange()
in_range.min = i["opt"]
in_range.opt = i["opt"]
in_range.max = i["opt"]
parsed_input_sizes.append(in_range)
else:
raise KeyError(
"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict")
elif isinstance(i, list):
in_range = trtorch._C.InputRange()
in_range.min = i
in_range.opt = i
in_range.max = i
parsed_input_sizes.append(in_range)
elif isinstance(i, tuple):
in_range = trtorch._C.InputRange()
in_range.min = list(i)
in_range.opt = list(i)
in_range.max = list(i)
parsed_input_sizes.append(in_range)
return parsed_input_sizes
def _parse_op_precision(precision: Any) -> _types.dtype:
if isinstance(precision, torch.dtype):
if precision == torch.int8:
return _types.dtype.int8
elif precision == torch.half:
return _types.dtype.half
elif precision == torch.float:
return _types.dtype.float
else:
raise TypeError("Provided an unsupported dtype as operating precision (support: int8, half, float), got: " +
str(precision))
elif isinstance(precision, _types.DataTypes):
return precision
else:
raise TypeError("Op precision type needs to be specified with a torch.dtype or a trtorch.dtype, got: " +
str(type(precision)))
def _parse_device_type(device: Any) -> _types.DeviceType:
if isinstance(device, torch.device):
if device.type == 'cuda':
return _types.DeviceType.gpu
else:
ValueError("Got a device type other than GPU or DLA (type: " + str(device.type) + ")")
elif isinstance(device, _types.DeviceType):
return device
elif isinstance(device, str):
if device == "gpu" or device == "GPU":
return _types.DeviceType.gpu
elif device == "dla" or device == "DLA":
return _types.DeviceType.dla
else:
ValueError("Got a device type other than GPU or DLA (type: " + str(device) + ")")
else:
raise TypeError("Device specification must be of type torch.device, string or trtorch.DeviceType, but got: " +
str(type(device)))
def _parse_compile_spec(compile_spec: Dict[str, Any]) -> trtorch._C.CompileSpec:
info = trtorch._C.CompileSpec()
if "input_shapes" not in compile_spec:
raise KeyError(
"Input shapes for inputs are required as a List, provided as either a static sizes or a range of three sizes (min, opt, max) as Dict"
)
info.input_ranges = _parse_input_ranges(compile_spec["input_shapes"])
if "op_precision" in compile_spec:
info.op_precision = _parse_op_precision(compile_spec["op_precision"])
if "refit" in compile_spec:
assert isinstance(compile_spec["refit"], bool)
info.refit = compile_spec["refit"]
if "debug" in compile_spec:
assert isinstance(compile_spec["debug"], bool)
info.debug = compile_spec["debug"]
if "strict_types" in compile_spec:
assert isinstance(compile_spec["strict_types"], bool)
info.strict_types = compile_spec["strict_types"]
if "allow_gpu_fallback" in compile_spec:
assert isinstance(compile_spec["allow_gpu_fallback"], bool)
info.allow_gpu_fallback = compile_spec["allow_gpu_fallback"]
if "device_type" in compile_spec:
info.device = _parse_device_type(compile_spec["device_type"])
if "capability" in compile_spec:
assert isinstance(compile_spec["capability"], _types.EngineCapability)
info.capability = compile_spec["capability"]
if "num_min_timing_iters" in compile_spec:
assert type(compile_spec["num_min_timing_iters"]) is int
info.num_min_timing_iters = compile_spec["num_min_timing_iters"]
if "num_avg_timing_iters" in compile_spec:
assert type(compile_spec["num_avg_timing_iters"]) is int
info.num_avg_timing_iters = compile_spec["num_avg_timing_iters"]
if "workspace_size" in compile_spec:
assert type(compile_spec["workspace_size"]) is int
info.workspace_size = compile_spec["workspace_size"]
if "max_batch_size" in compile_spec:
assert type(compile_spec["max_batch_size"]) is int
info.max_batch_size = compile_spec["max_batch_size"]
return info
def TensorRTCompileSpec(compile_spec: Dict[str, Any]):
"""
Utility to create a formated spec dictionary for using the PyTorch TensorRT backend
Args:
compile_spec (dict): Compilation settings including operating precision, target device, etc.
One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
to the graph. All other keys are optional. Entries for each method to be compiled.
.. code-block:: py
CompileSpec = {
"forward" : trtorch.TensorRTCompileSpec({
"input_shapes": [
(1, 3, 224, 224), # Static input shape for input #1
{
"min": (1, 3, 224, 224),
"opt": (1, 3, 512, 512),
"max": (1, 3, 1024, 1024)
} # Dynamic input shape for input #2
],
"op_precision": torch.half, # Operating precision set to FP16
"refit": False, # enable refit
"debug": False, # enable debuggable engine
"strict_types": False, # kernels should strictly run in operating precision
"allow_gpu_fallback": True, # (DLA only) Allow layers unsupported on DLA to run on GPU
"device": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
"capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
"num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
"num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
"workspace_size": 0, # Maximum size of workspace given to TensorRT
"max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
})
}
Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
to select device type.
Returns:
torch.classes.tensorrt.CompileSpec: List of methods and formated spec objects to be provided to ``torch._C._jit_to_tensorrt``
"""
parsed_spec = _parse_compile_spec(compile_spec)
backend_spec = torch.classes.tensorrt.CompileSpec()
for i in parsed_spec.input_ranges:
ir = torch.classes.tensorrt.InputRange()
ir.set_min(i.min)
ir.set_opt(i.opt)
ir.set_max(i.max)
backend_spec.append_input_range(ir)
backend_spec.set_op_precision(int(parsed_spec.op_precision))
backend_spec.set_refit(parsed_spec.refit)
backend_spec.set_debug(parsed_spec.debug)
backend_spec.set_refit(parsed_spec.refit)
backend_spec.set_strict_types(parsed_spec.strict_types)
backend_spec.set_allow_gpu_fallback(parsed_spec.allow_gpu_fallback)
backend_spec.set_device(int(parsed_spec.device))
backend_spec.set_capability(int(parsed_spec.capability))
backend_spec.set_num_min_timing_iters(parsed_spec.num_min_timing_iters)
backend_spec.set_num_avg_timing_iters(parsed_spec.num_avg_timing_iters)
backend_spec.set_workspace_size(parsed_spec.workspace_size)
backend_spec.set_max_batch_size(parsed_spec.max_batch_size)
return backend_spec
| 42.16
| 145
| 0.630824
|
from typing import List, Dict, Any
import torch
import trtorch._C
from trtorch import _types
def _supported_input_size_type(input_size: Any) -> bool:
if isinstance(input_size, torch.Size):
return True
elif isinstance(input_size, tuple):
return True
elif isinstance(input_size, list):
return True
else:
raise TypeError(
"Input sizes for inputs are required to be a List, tuple or torch.Size or a Dict of three sizes (min, opt, max), found type: "
+ str(type(input_size)))
def _parse_input_ranges(input_sizes: List) -> List:
if any(not isinstance(i, dict) and not _supported_input_size_type(i) for i in input_sizes):
raise KeyError("An input size must either be a static size or a range of three sizes (min, opt, max) as Dict")
parsed_input_sizes = []
for i in input_sizes:
if isinstance(i, dict):
if all(k in i for k in ["min", "opt", "min"]):
in_range = trtorch._C.InputRange()
in_range.min = i["min"]
in_range.opt = i["opt"]
in_range.max = i["max"]
parsed_input_sizes.append(in_range)
elif "opt" in i:
in_range = trtorch._C.InputRange()
in_range.min = i["opt"]
in_range.opt = i["opt"]
in_range.max = i["opt"]
parsed_input_sizes.append(in_range)
else:
raise KeyError(
"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict")
elif isinstance(i, list):
in_range = trtorch._C.InputRange()
in_range.min = i
in_range.opt = i
in_range.max = i
parsed_input_sizes.append(in_range)
elif isinstance(i, tuple):
in_range = trtorch._C.InputRange()
in_range.min = list(i)
in_range.opt = list(i)
in_range.max = list(i)
parsed_input_sizes.append(in_range)
return parsed_input_sizes
def _parse_op_precision(precision: Any) -> _types.dtype:
if isinstance(precision, torch.dtype):
if precision == torch.int8:
return _types.dtype.int8
elif precision == torch.half:
return _types.dtype.half
elif precision == torch.float:
return _types.dtype.float
else:
raise TypeError("Provided an unsupported dtype as operating precision (support: int8, half, float), got: " +
str(precision))
elif isinstance(precision, _types.DataTypes):
return precision
else:
raise TypeError("Op precision type needs to be specified with a torch.dtype or a trtorch.dtype, got: " +
str(type(precision)))
def _parse_device_type(device: Any) -> _types.DeviceType:
if isinstance(device, torch.device):
if device.type == 'cuda':
return _types.DeviceType.gpu
else:
ValueError("Got a device type other than GPU or DLA (type: " + str(device.type) + ")")
elif isinstance(device, _types.DeviceType):
return device
elif isinstance(device, str):
if device == "gpu" or device == "GPU":
return _types.DeviceType.gpu
elif device == "dla" or device == "DLA":
return _types.DeviceType.dla
else:
ValueError("Got a device type other than GPU or DLA (type: " + str(device) + ")")
else:
raise TypeError("Device specification must be of type torch.device, string or trtorch.DeviceType, but got: " +
str(type(device)))
def _parse_compile_spec(compile_spec: Dict[str, Any]) -> trtorch._C.CompileSpec:
info = trtorch._C.CompileSpec()
if "input_shapes" not in compile_spec:
raise KeyError(
"Input shapes for inputs are required as a List, provided as either a static sizes or a range of three sizes (min, opt, max) as Dict"
)
info.input_ranges = _parse_input_ranges(compile_spec["input_shapes"])
if "op_precision" in compile_spec:
info.op_precision = _parse_op_precision(compile_spec["op_precision"])
if "refit" in compile_spec:
assert isinstance(compile_spec["refit"], bool)
info.refit = compile_spec["refit"]
if "debug" in compile_spec:
assert isinstance(compile_spec["debug"], bool)
info.debug = compile_spec["debug"]
if "strict_types" in compile_spec:
assert isinstance(compile_spec["strict_types"], bool)
info.strict_types = compile_spec["strict_types"]
if "allow_gpu_fallback" in compile_spec:
assert isinstance(compile_spec["allow_gpu_fallback"], bool)
info.allow_gpu_fallback = compile_spec["allow_gpu_fallback"]
if "device_type" in compile_spec:
info.device = _parse_device_type(compile_spec["device_type"])
if "capability" in compile_spec:
assert isinstance(compile_spec["capability"], _types.EngineCapability)
info.capability = compile_spec["capability"]
if "num_min_timing_iters" in compile_spec:
assert type(compile_spec["num_min_timing_iters"]) is int
info.num_min_timing_iters = compile_spec["num_min_timing_iters"]
if "num_avg_timing_iters" in compile_spec:
assert type(compile_spec["num_avg_timing_iters"]) is int
info.num_avg_timing_iters = compile_spec["num_avg_timing_iters"]
if "workspace_size" in compile_spec:
assert type(compile_spec["workspace_size"]) is int
info.workspace_size = compile_spec["workspace_size"]
if "max_batch_size" in compile_spec:
assert type(compile_spec["max_batch_size"]) is int
info.max_batch_size = compile_spec["max_batch_size"]
return info
def TensorRTCompileSpec(compile_spec: Dict[str, Any]):
parsed_spec = _parse_compile_spec(compile_spec)
backend_spec = torch.classes.tensorrt.CompileSpec()
for i in parsed_spec.input_ranges:
ir = torch.classes.tensorrt.InputRange()
ir.set_min(i.min)
ir.set_opt(i.opt)
ir.set_max(i.max)
backend_spec.append_input_range(ir)
backend_spec.set_op_precision(int(parsed_spec.op_precision))
backend_spec.set_refit(parsed_spec.refit)
backend_spec.set_debug(parsed_spec.debug)
backend_spec.set_refit(parsed_spec.refit)
backend_spec.set_strict_types(parsed_spec.strict_types)
backend_spec.set_allow_gpu_fallback(parsed_spec.allow_gpu_fallback)
backend_spec.set_device(int(parsed_spec.device))
backend_spec.set_capability(int(parsed_spec.capability))
backend_spec.set_num_min_timing_iters(parsed_spec.num_min_timing_iters)
backend_spec.set_num_avg_timing_iters(parsed_spec.num_avg_timing_iters)
backend_spec.set_workspace_size(parsed_spec.workspace_size)
backend_spec.set_max_batch_size(parsed_spec.max_batch_size)
return backend_spec
| true
| true
|
f7016cf80f2cf98c95ee5e74713188d48fa48077
| 295
|
py
|
Python
|
appointment_booking_drchrono/appointments/urls.py
|
TimothyBest/Appointment_Booking_drchrono
|
279faf13724e768a6712d2f9911fe32399e4544c
|
[
"MIT"
] | null | null | null |
appointment_booking_drchrono/appointments/urls.py
|
TimothyBest/Appointment_Booking_drchrono
|
279faf13724e768a6712d2f9911fe32399e4544c
|
[
"MIT"
] | null | null | null |
appointment_booking_drchrono/appointments/urls.py
|
TimothyBest/Appointment_Booking_drchrono
|
279faf13724e768a6712d2f9911fe32399e4544c
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
urlpatterns = patterns('appointments.views',
url(r'^appointment/(?P<practice_id>\d+)/$', 'appointment_form', name='appointment_form'),
url(r'^appointment/created/(?P<practice_id>\d+)/$', 'appointment_created', name='appointment_created'),
)
| 32.777778
| 107
| 0.718644
|
from django.conf.urls import patterns, url
urlpatterns = patterns('appointments.views',
url(r'^appointment/(?P<practice_id>\d+)/$', 'appointment_form', name='appointment_form'),
url(r'^appointment/created/(?P<practice_id>\d+)/$', 'appointment_created', name='appointment_created'),
)
| true
| true
|
f7016d30d5f6e38ed75c2e93258d83dff26db920
| 2,848
|
py
|
Python
|
menu.py
|
applefreak/europi
|
6ad531c72e148fd1c5f75ee82e588b886ef330a4
|
[
"MIT"
] | 1
|
2021-01-11T01:36:07.000Z
|
2021-01-11T01:36:07.000Z
|
menu.py
|
applefreak/europi
|
6ad531c72e148fd1c5f75ee82e588b886ef330a4
|
[
"MIT"
] | null | null | null |
menu.py
|
applefreak/europi
|
6ad531c72e148fd1c5f75ee82e588b886ef330a4
|
[
"MIT"
] | null | null | null |
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from rotary_class import RotaryEncoder
class Display():
def __init__(self, disp):
self.disp = disp
self.dimensions = (disp.width, disp.height)
self.image = Image.new('1', self.dimensions)
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype("./DejaVuSansMono.ttf", 10)
def display_clear(self):
self.draw.rectangle((0, 0) + self.dimensions, outline = 0, fill = 0)
def init_display(self):
self.disp.begin()
self.disp.clear()
self.disp.display()
self.display_clear()
self.disp.image(self.image)
self.disp.display()
def draw_rows(self, rows, inv_col):
self.display_clear()
for idx, row in enumerate(rows):
if inv_col == idx:
self.draw.rectangle([(0, 10 * idx), (10 * idx + self.dimensions[0], 1 + 10 * idx + 10)], outline = 0, fill = 255)
self.draw.text((1, 10 * idx), row, font = self.font, fill = 0)
else:
self.draw.rectangle([(0, 10 * idx), (10 * idx + self.dimensions[0], 1 + 10 * idx + 10)], outline = 0, fill = 0)
self.draw.text((1, 10 * idx), row, font = self.font, fill = 255)
self.disp.image(self.image)
self.disp.display()
class Menu():
def __init__(self, disp, encoder, items = []):
self.items = items
self.pointer = 0
self.row = 0
self.last_row = 0
self.last_slice = None
self.disp = Display(disp)
self.disp.init_display()
self.draw()
def encoder_ev (direction):
if direction == 1:
self.prev()
elif direction == 2:
self.next()
elif direction == 3:
self.exec_item()
self.encoder = RotaryEncoder(encoder["pin1"], encoder["pin2"], encoder["sw"], encoder_ev)
def draw(self):
tmp_slice = None
if self.row == self.last_row:
if self.last_row == 0:
tmp_slice = self.items[self.pointer:self.pointer + 3]
else:
tmp_slice = self.items[self.pointer - 2:self.pointer + 1]
self.disp.draw_rows(tmp_slice, self.row)
self.last_slice = tmp_slice
else:
self.disp.draw_rows(self.last_slice, self.row)
self.last_row = self.row
def next(self):
if self.pointer + 1 <= len(self.items) - 1:
self.pointer += 1
if self.row < 2:
self.row += 1
self.draw()
def prev(self):
if self.pointer - 1 >= 0:
self.pointer -= 1
if self.row > 0:
self.row -= 1
self.draw()
def exec_item(self):
print("Item selcted", str(self.pointer))
| 32
| 129
| 0.542837
|
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from rotary_class import RotaryEncoder
class Display():
def __init__(self, disp):
self.disp = disp
self.dimensions = (disp.width, disp.height)
self.image = Image.new('1', self.dimensions)
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype("./DejaVuSansMono.ttf", 10)
def display_clear(self):
self.draw.rectangle((0, 0) + self.dimensions, outline = 0, fill = 0)
def init_display(self):
self.disp.begin()
self.disp.clear()
self.disp.display()
self.display_clear()
self.disp.image(self.image)
self.disp.display()
def draw_rows(self, rows, inv_col):
self.display_clear()
for idx, row in enumerate(rows):
if inv_col == idx:
self.draw.rectangle([(0, 10 * idx), (10 * idx + self.dimensions[0], 1 + 10 * idx + 10)], outline = 0, fill = 255)
self.draw.text((1, 10 * idx), row, font = self.font, fill = 0)
else:
self.draw.rectangle([(0, 10 * idx), (10 * idx + self.dimensions[0], 1 + 10 * idx + 10)], outline = 0, fill = 0)
self.draw.text((1, 10 * idx), row, font = self.font, fill = 255)
self.disp.image(self.image)
self.disp.display()
class Menu():
def __init__(self, disp, encoder, items = []):
self.items = items
self.pointer = 0
self.row = 0
self.last_row = 0
self.last_slice = None
self.disp = Display(disp)
self.disp.init_display()
self.draw()
def encoder_ev (direction):
if direction == 1:
self.prev()
elif direction == 2:
self.next()
elif direction == 3:
self.exec_item()
self.encoder = RotaryEncoder(encoder["pin1"], encoder["pin2"], encoder["sw"], encoder_ev)
def draw(self):
tmp_slice = None
if self.row == self.last_row:
if self.last_row == 0:
tmp_slice = self.items[self.pointer:self.pointer + 3]
else:
tmp_slice = self.items[self.pointer - 2:self.pointer + 1]
self.disp.draw_rows(tmp_slice, self.row)
self.last_slice = tmp_slice
else:
self.disp.draw_rows(self.last_slice, self.row)
self.last_row = self.row
def next(self):
if self.pointer + 1 <= len(self.items) - 1:
self.pointer += 1
if self.row < 2:
self.row += 1
self.draw()
def prev(self):
if self.pointer - 1 >= 0:
self.pointer -= 1
if self.row > 0:
self.row -= 1
self.draw()
def exec_item(self):
print("Item selcted", str(self.pointer))
| true
| true
|
f7016dd197dc84aeab737877e938c7ecfc8970b5
| 5,276
|
py
|
Python
|
tests/analyzer/test_as_import.py
|
CAM-Gerlach/unimport
|
acaebf547274a95a33816e47ec22bb73d8456b17
|
[
"MIT"
] | 147
|
2019-09-19T15:43:06.000Z
|
2022-03-25T16:42:08.000Z
|
tests/analyzer/test_as_import.py
|
CAM-Gerlach/unimport
|
acaebf547274a95a33816e47ec22bb73d8456b17
|
[
"MIT"
] | 154
|
2019-10-31T19:50:18.000Z
|
2022-03-29T12:43:00.000Z
|
tests/analyzer/test_as_import.py
|
CAM-Gerlach/unimport
|
acaebf547274a95a33816e47ec22bb73d8456b17
|
[
"MIT"
] | 28
|
2019-10-31T18:11:13.000Z
|
2021-09-06T08:24:14.000Z
|
from tests.analyzer.utils import UnusedTestCase
from unimport.statement import Import, ImportFrom
class AsImportTestCase(UnusedTestCase):
def test_as_import_all_unused_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
Import(
lineno=6,
column=1,
name="x",
package="le",
),
],
)
def test_as_import_one_used_in_function_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
def x(t=x):pass
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
],
)
| 29.311111
| 60
| 0.288855
|
from tests.analyzer.utils import UnusedTestCase
from unimport.statement import Import, ImportFrom
class AsImportTestCase(UnusedTestCase):
def test_as_import_all_unused_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
Import(
lineno=6,
column=1,
name="x",
package="le",
),
],
)
def test_as_import_one_used_in_function_all_cases(self):
self.assertSourceAfterScanningEqualToExpected(
"""\
from x import y as z
import x
from t import s as ss
from f import a as c, l as k, i as ii
from fo import (bar, i, x as z)
import le as x
def x(t=x):pass
""",
[
ImportFrom(
lineno=1,
column=1,
name="z",
package="x",
star=False,
suggestions=[],
),
Import(
lineno=2,
column=1,
name="x",
package="x",
),
ImportFrom(
lineno=3,
column=1,
name="ss",
package="t",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=1,
name="c",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=2,
name="k",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=4,
column=3,
name="ii",
package="f",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=1,
name="bar",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=2,
name="i",
package="fo",
star=False,
suggestions=[],
),
ImportFrom(
lineno=5,
column=3,
name="z",
package="fo",
star=False,
suggestions=[],
),
],
)
| true
| true
|
f7016f34f904942762b689e2683eb61c1e3e3a29
| 2,470
|
py
|
Python
|
modules/Google_Takeout/modules/utils/takeout_case.py
|
dfrc-korea/carpe
|
9afa8b624948fd462bb19c90cbc811228fd52c21
|
[
"Apache-2.0"
] | 56
|
2019-02-07T06:21:45.000Z
|
2022-03-21T08:19:24.000Z
|
modules/Google_Takeout/modules/utils/takeout_case.py
|
dfrc-korea/carpe
|
9afa8b624948fd462bb19c90cbc811228fd52c21
|
[
"Apache-2.0"
] | 5
|
2020-05-25T17:29:00.000Z
|
2021-12-13T20:49:08.000Z
|
modules/Google_Takeout/modules/utils/takeout_case.py
|
dfrc-korea/carpe
|
9afa8b624948fd462bb19c90cbc811228fd52c21
|
[
"Apache-2.0"
] | 31
|
2019-03-13T10:23:49.000Z
|
2021-11-04T12:14:58.000Z
|
import os
from .takeout_sqlite3 import SQLite3
import multiprocessing
CONTACTS = 'Contacts' + os.sep + 'All Contacts' + os.sep + 'All Contacts.vcf'
DRIVE = 'Drive'
MY_ACTIVITY_ASSISTANT_PATH = 'My Activity' + os.sep + 'Assistant' + os.sep + 'MyActivity.html'
MY_ACTIVITY_GMAIL_PATH = 'My Activity' + os.sep + 'Gmail' + os.sep + 'MyActivity.html'
MY_ACTIVITY_GOOGLE_ANALYTICS_PATH = 'My Activity' + os.sep + 'Google Analytics' + os.sep + 'MyActivity.html'
MY_ACTIVITY_YOUTUBE_PATH = 'My Activity' + os.sep + 'YouTube' + os.sep + 'MyActivity.html'
MY_ACTIVITY_VIDEO_SEARCH_PATH = 'My Activity' + os.sep + 'Video Search' + os.sep + 'MyActivity.html'
MY_ACTIVITY_VOICE_AUDIO_PATH = 'My Activity' + os.sep + 'Voice and Audio' + os.sep + 'MyActivity.html'
MY_ACTIVITY_MAPS_PATH = 'My Activity' + os.sep + 'Maps' + os.sep + 'MyActivity.html'
MY_ACTIVITY_ANDROID_PATH = 'My Activity' + os.sep + 'Android' + os.sep + 'MyActivity.html'
MY_ACTIVITY_CHROME_PATH = 'My Activity' + os.sep + 'Chrome' + os.sep + 'MyActivity.html'
class Case(object):
def __init__(self, input_dir):
self.number_of_system_processes = 1
self.number_of_input_processes = 1
self.input_dir_path = input_dir
self.set_file_path()
def set_file_path(self):
if self.input_dir_path[-1] == os.sep:
self.input_dir_path = self.input_dir_path[:-1]
self.takeout_path = self.input_dir_path + os.sep + 'Takeout'
if not os.path.exists(self.takeout_path):
return False
self.takeout_contacts_path = self.takeout_path + os.sep + CONTACTS
self.takeout_drive_path = self.takeout_path + os.sep + DRIVE
self.takeout_my_activity_assistant_path = self.takeout_path + os.sep + MY_ACTIVITY_ASSISTANT_PATH
self.takeout_my_activity_gmail_path = self.takeout_path + os.sep + MY_ACTIVITY_GMAIL_PATH
self.takeout_my_activity_google_analytics_path = self.takeout_path + os.sep + MY_ACTIVITY_GOOGLE_ANALYTICS_PATH
self.takeout_my_activity_youtube_path = self.takeout_path + os.sep + MY_ACTIVITY_YOUTUBE_PATH
self.takeout_my_activity_video_search_path = self.takeout_path + os.sep + MY_ACTIVITY_VIDEO_SEARCH_PATH
self.takeout_my_activity_voice_audio_path = self.takeout_path + os.sep + MY_ACTIVITY_VOICE_AUDIO_PATH
self.takeout_my_activity_maps_path = self.takeout_path + os.sep + MY_ACTIVITY_MAPS_PATH
self.takeout_my_activity_android_path = self.takeout_path + os.sep + MY_ACTIVITY_ANDROID_PATH
self.takeout_my_activity_chrome_path = self.takeout_path + os.sep + MY_ACTIVITY_CHROME_PATH
| 53.695652
| 113
| 0.773684
|
import os
from .takeout_sqlite3 import SQLite3
import multiprocessing
CONTACTS = 'Contacts' + os.sep + 'All Contacts' + os.sep + 'All Contacts.vcf'
DRIVE = 'Drive'
MY_ACTIVITY_ASSISTANT_PATH = 'My Activity' + os.sep + 'Assistant' + os.sep + 'MyActivity.html'
MY_ACTIVITY_GMAIL_PATH = 'My Activity' + os.sep + 'Gmail' + os.sep + 'MyActivity.html'
MY_ACTIVITY_GOOGLE_ANALYTICS_PATH = 'My Activity' + os.sep + 'Google Analytics' + os.sep + 'MyActivity.html'
MY_ACTIVITY_YOUTUBE_PATH = 'My Activity' + os.sep + 'YouTube' + os.sep + 'MyActivity.html'
MY_ACTIVITY_VIDEO_SEARCH_PATH = 'My Activity' + os.sep + 'Video Search' + os.sep + 'MyActivity.html'
MY_ACTIVITY_VOICE_AUDIO_PATH = 'My Activity' + os.sep + 'Voice and Audio' + os.sep + 'MyActivity.html'
MY_ACTIVITY_MAPS_PATH = 'My Activity' + os.sep + 'Maps' + os.sep + 'MyActivity.html'
MY_ACTIVITY_ANDROID_PATH = 'My Activity' + os.sep + 'Android' + os.sep + 'MyActivity.html'
MY_ACTIVITY_CHROME_PATH = 'My Activity' + os.sep + 'Chrome' + os.sep + 'MyActivity.html'
class Case(object):
def __init__(self, input_dir):
self.number_of_system_processes = 1
self.number_of_input_processes = 1
self.input_dir_path = input_dir
self.set_file_path()
def set_file_path(self):
if self.input_dir_path[-1] == os.sep:
self.input_dir_path = self.input_dir_path[:-1]
self.takeout_path = self.input_dir_path + os.sep + 'Takeout'
if not os.path.exists(self.takeout_path):
return False
self.takeout_contacts_path = self.takeout_path + os.sep + CONTACTS
self.takeout_drive_path = self.takeout_path + os.sep + DRIVE
self.takeout_my_activity_assistant_path = self.takeout_path + os.sep + MY_ACTIVITY_ASSISTANT_PATH
self.takeout_my_activity_gmail_path = self.takeout_path + os.sep + MY_ACTIVITY_GMAIL_PATH
self.takeout_my_activity_google_analytics_path = self.takeout_path + os.sep + MY_ACTIVITY_GOOGLE_ANALYTICS_PATH
self.takeout_my_activity_youtube_path = self.takeout_path + os.sep + MY_ACTIVITY_YOUTUBE_PATH
self.takeout_my_activity_video_search_path = self.takeout_path + os.sep + MY_ACTIVITY_VIDEO_SEARCH_PATH
self.takeout_my_activity_voice_audio_path = self.takeout_path + os.sep + MY_ACTIVITY_VOICE_AUDIO_PATH
self.takeout_my_activity_maps_path = self.takeout_path + os.sep + MY_ACTIVITY_MAPS_PATH
self.takeout_my_activity_android_path = self.takeout_path + os.sep + MY_ACTIVITY_ANDROID_PATH
self.takeout_my_activity_chrome_path = self.takeout_path + os.sep + MY_ACTIVITY_CHROME_PATH
| true
| true
|
f7016f7ed1a12ccf8abe398fe979b5e78fb5499f
| 2,938
|
py
|
Python
|
gym_puyopuyo/test-feedforward-smallenv.py
|
brnor/dipl
|
db516610aecffb10825e899fb5aa9f2902093b6e
|
[
"MIT"
] | null | null | null |
gym_puyopuyo/test-feedforward-smallenv.py
|
brnor/dipl
|
db516610aecffb10825e899fb5aa9f2902093b6e
|
[
"MIT"
] | null | null | null |
gym_puyopuyo/test-feedforward-smallenv.py
|
brnor/dipl
|
db516610aecffb10825e899fb5aa9f2902093b6e
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import pickle
import time
from gym_puyopuyo import register
import gym
import numpy as np
import neat
import visualize
piece_shape = (3, 2)
DRAW_NETS = False
NUM_COLORS = 3.0 # 3 colors in the small env mode
# TODO: could probably read color number from observation data
fn_results = "feedforward-small"
def multiplyMatrices(pieces, field, norm = True):
pieces = pieces.astype(np.float64)
field = field.astype(np.float64)
pieces_sum = np.zeros(piece_shape)
field_sum = np.zeros(field[0].shape)
for i in range(0, len(pieces)):
pieces[i] = np.multiply(pieces[i], i + 1)
if(norm):
pieces[i] /= NUM_COLORS
pieces_sum += pieces[i]
for i in range(0, len(field)):
field[i] = np.multiply(field[i], i + 1)
if(norm):
field[i] /= NUM_COLORS
field_sum += field[i]
return pieces_sum, field_sum
def run():
with open("results/winner-pickle-"+fn_results, 'rb') as f:
c = pickle.load(f)
print('loaded genome:')
print(c)
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward-small')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
net = neat.nn.FeedForwardNetwork.create(c, config)
register()
env = gym.make("PuyoPuyoEndlessSmall-v2")
done = False
ob = env.reset()
count = 0
total_reward = 0
while True:
env.render()
#input()
time.sleep(0.5)
pieces_sum, field_sum = multiplyMatrices(ob[0], ob[1])
next_piece = pieces_sum[0]
inp_piece = np.ndarray.flatten(next_piece)
inp_field = np.ndarray.flatten(field_sum)
inputs = np.hstack([inp_piece, inp_field])
nn_output = net.activate(inputs)
action = np.argmax(nn_output)
#print(nn_output)
#nn_output = int(round(nn_output[0] * NUM_ACTIONS))
#print(nn_output)
#input()
ob, rew, done, info = env.step(action)
total_reward += rew
count += 1
if done:
break
print("Game played for ", count, " turns.")
print("Total score: ", total_reward)
if DRAW_NETS:
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+".net")
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+"-enabled.net",
show_disabled=False)
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+"-pruned.net",
show_disabled=False, prune_unused=True)
if __name__ == '__main__':
run()
| 28.803922
| 77
| 0.591559
|
from __future__ import print_function
import os
import pickle
import time
from gym_puyopuyo import register
import gym
import numpy as np
import neat
import visualize
piece_shape = (3, 2)
DRAW_NETS = False
NUM_COLORS = 3.0 fn_results = "feedforward-small"
def multiplyMatrices(pieces, field, norm = True):
pieces = pieces.astype(np.float64)
field = field.astype(np.float64)
pieces_sum = np.zeros(piece_shape)
field_sum = np.zeros(field[0].shape)
for i in range(0, len(pieces)):
pieces[i] = np.multiply(pieces[i], i + 1)
if(norm):
pieces[i] /= NUM_COLORS
pieces_sum += pieces[i]
for i in range(0, len(field)):
field[i] = np.multiply(field[i], i + 1)
if(norm):
field[i] /= NUM_COLORS
field_sum += field[i]
return pieces_sum, field_sum
def run():
with open("results/winner-pickle-"+fn_results, 'rb') as f:
c = pickle.load(f)
print('loaded genome:')
print(c)
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward-small')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
net = neat.nn.FeedForwardNetwork.create(c, config)
register()
env = gym.make("PuyoPuyoEndlessSmall-v2")
done = False
ob = env.reset()
count = 0
total_reward = 0
while True:
env.render()
time.sleep(0.5)
pieces_sum, field_sum = multiplyMatrices(ob[0], ob[1])
next_piece = pieces_sum[0]
inp_piece = np.ndarray.flatten(next_piece)
inp_field = np.ndarray.flatten(field_sum)
inputs = np.hstack([inp_piece, inp_field])
nn_output = net.activate(inputs)
action = np.argmax(nn_output)
ob, rew, done, info = env.step(action)
total_reward += rew
count += 1
if done:
break
print("Game played for ", count, " turns.")
print("Total score: ", total_reward)
if DRAW_NETS:
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+".net")
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+"-enabled.net",
show_disabled=False)
visualize.draw_net(config, c, view=True,
filename="results/winner-"+fn_results+"-pruned.net",
show_disabled=False, prune_unused=True)
if __name__ == '__main__':
run()
| true
| true
|
f7016fdcb57fb610e943f783ffeb624675e41126
| 2,021
|
py
|
Python
|
app/dao/broadcast_message_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | null | null | null |
app/dao/broadcast_message_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | null | null | null |
app/dao/broadcast_message_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | null | null | null |
import uuid
from app import db
from app.dao.dao_utils import transactional
from app.models import (
BroadcastMessage,
BroadcastEvent,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber,
BroadcastProviderMessageStatus
)
def dao_get_broadcast_message_by_id_and_service_id(broadcast_message_id, service_id):
return BroadcastMessage.query.filter(
BroadcastMessage.id == broadcast_message_id,
BroadcastMessage.service_id == service_id
).one()
def dao_get_broadcast_event_by_id(broadcast_event_id):
return BroadcastEvent.query.filter(BroadcastEvent.id == broadcast_event_id).one()
def dao_get_broadcast_messages_for_service(service_id):
return BroadcastMessage.query.filter(
BroadcastMessage.service_id == service_id
).order_by(BroadcastMessage.created_at)
def get_earlier_events_for_broadcast_event(broadcast_event_id):
"""
This is used to build up the references list.
"""
this_event = BroadcastEvent.query.get(broadcast_event_id)
return BroadcastEvent.query.filter(
BroadcastEvent.broadcast_message_id == this_event.broadcast_message_id,
BroadcastEvent.sent_at < this_event.sent_at
).order_by(
BroadcastEvent.sent_at.asc()
).all()
@transactional
def create_broadcast_provider_message(broadcast_event, provider):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=BroadcastProviderMessageStatus.SENDING,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message
| 31.578125
| 85
| 0.764473
|
import uuid
from app import db
from app.dao.dao_utils import transactional
from app.models import (
BroadcastMessage,
BroadcastEvent,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber,
BroadcastProviderMessageStatus
)
def dao_get_broadcast_message_by_id_and_service_id(broadcast_message_id, service_id):
return BroadcastMessage.query.filter(
BroadcastMessage.id == broadcast_message_id,
BroadcastMessage.service_id == service_id
).one()
def dao_get_broadcast_event_by_id(broadcast_event_id):
return BroadcastEvent.query.filter(BroadcastEvent.id == broadcast_event_id).one()
def dao_get_broadcast_messages_for_service(service_id):
return BroadcastMessage.query.filter(
BroadcastMessage.service_id == service_id
).order_by(BroadcastMessage.created_at)
def get_earlier_events_for_broadcast_event(broadcast_event_id):
this_event = BroadcastEvent.query.get(broadcast_event_id)
return BroadcastEvent.query.filter(
BroadcastEvent.broadcast_message_id == this_event.broadcast_message_id,
BroadcastEvent.sent_at < this_event.sent_at
).order_by(
BroadcastEvent.sent_at.asc()
).all()
@transactional
def create_broadcast_provider_message(broadcast_event, provider):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=BroadcastProviderMessageStatus.SENDING,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message
| true
| true
|
f7016fe4e649e0defe4ee246bb7a91d3d39ef94e
| 1,706
|
py
|
Python
|
python/open3d/ml/torch/pipelines.py
|
xkaraman/Open3D
|
a1d65eca537a2b099fc3b6d08edb26e45b717e40
|
[
"MIT"
] | 2
|
2020-12-08T15:38:34.000Z
|
2021-04-10T02:58:15.000Z
|
python/open3d/ml/torch/pipelines.py
|
moonwonlee/Open3D
|
dda9b3a0129fa6c60f913672a70ff02483dcd0f3
|
[
"MIT"
] | null | null | null |
python/open3d/ml/torch/pipelines.py
|
moonwonlee/Open3D
|
dda9b3a0129fa6c60f913672a70ff02483dcd0f3
|
[
"MIT"
] | 1
|
2021-11-05T01:16:13.000Z
|
2021-11-05T01:16:13.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2020 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""
3D ML pipelines for PyTorch.
"""
import os as _os
from open3d import _build_config
if _build_config['BUNDLE_OPEN3D_ML']:
if 'OPEN3D_ML_ROOT' in _os.environ:
from ml3d.torch.pipelines import *
else:
from open3d._ml3d.torch.pipelines import *
| 44.894737
| 79
| 0.631301
|
import os as _os
from open3d import _build_config
if _build_config['BUNDLE_OPEN3D_ML']:
if 'OPEN3D_ML_ROOT' in _os.environ:
from ml3d.torch.pipelines import *
else:
from open3d._ml3d.torch.pipelines import *
| true
| true
|
f7016fee3d19733d380e1f4199f8769b9529e54f
| 722
|
py
|
Python
|
{{cookiecutter.project_name}}/core/deployment/gunicorn_conf.py
|
ProjectTemplates/django-webpack-app
|
50081d009162503042840a904e25c4b32d606cf5
|
[
"MIT"
] | 2
|
2020-11-10T10:16:48.000Z
|
2021-02-05T13:21:11.000Z
|
{{cookiecutter.project_name}}/core/deployment/gunicorn_conf.py
|
ProjectTemplates/django-webpack-app
|
50081d009162503042840a904e25c4b32d606cf5
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/core/deployment/gunicorn_conf.py
|
ProjectTemplates/django-webpack-app
|
50081d009162503042840a904e25c4b32d606cf5
|
[
"MIT"
] | 1
|
2021-02-05T11:37:18.000Z
|
2021-02-05T11:37:18.000Z
|
import os
def to_bool(value):
return (
value is True or
(isinstance(value, str) and value.lower() in ['true', 'yes']) or
(isinstance(value, (int, float)) and value > 0)
)
bind = '0.0.0.0:{}'.format(os.getenv('GUNICORN_PORT', '8000'))
max_requests = int(os.getenv('GUNICORN_MAX_REQUESTS', '10000'))
max_requests_jitter = int(os.getenv('GUNICORN_MAX_REQUESTS_JITTER', '100'))
user = os.getenv('GUNICORN_USER', 'root')
keepalive = int(os.getenv('GUNICORN_KEEPALIVE', '70'))
reuse_port = to_bool(os.getenv('GUNICORN_REUSE_PORT', True))
accesslog = '-'
errorlog = '-'
print_config = True
workers = int(os.getenv('GUNICORN_WORKERS', '5'))
threads = int(os.getenv('GUNICORN_THREADS', '5'))
| 27.769231
| 75
| 0.671745
|
import os
def to_bool(value):
return (
value is True or
(isinstance(value, str) and value.lower() in ['true', 'yes']) or
(isinstance(value, (int, float)) and value > 0)
)
bind = '0.0.0.0:{}'.format(os.getenv('GUNICORN_PORT', '8000'))
max_requests = int(os.getenv('GUNICORN_MAX_REQUESTS', '10000'))
max_requests_jitter = int(os.getenv('GUNICORN_MAX_REQUESTS_JITTER', '100'))
user = os.getenv('GUNICORN_USER', 'root')
keepalive = int(os.getenv('GUNICORN_KEEPALIVE', '70'))
reuse_port = to_bool(os.getenv('GUNICORN_REUSE_PORT', True))
accesslog = '-'
errorlog = '-'
print_config = True
workers = int(os.getenv('GUNICORN_WORKERS', '5'))
threads = int(os.getenv('GUNICORN_THREADS', '5'))
| true
| true
|
f701701a180004073de4f335fee9153105be7e42
| 584
|
py
|
Python
|
tests/test_pin_num_name.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | 2
|
2022-02-27T14:31:52.000Z
|
2022-02-27T14:31:56.000Z
|
tests/test_pin_num_name.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | null | null | null |
tests/test_pin_num_name.py
|
arjenroodselaar/skidl
|
0bf801bd3b74e6ef94bd9aa1b68eef756b568276
|
[
"MIT"
] | 1
|
2020-09-21T23:31:41.000Z
|
2020-09-21T23:31:41.000Z
|
import pytest
from skidl import *
from .setup_teardown import *
def test_pin_names_1():
codec = Part("xess.lib", "ak4520a")
assert codec["ain"] == codec.n["ain"]
assert codec[1:4] == codec.p[1:4]
def test_pin_names_2():
codec = Part("xess.lib", "ak4520a")
codec[4].name = "A1"
codec[8].name = "A2"
codec[8].num = "A1"
assert codec[4] is codec.n["A1"]
assert codec.p[4] is codec.n["A1"]
assert codec[4] is codec.p[4]
assert codec.p["A1"] is codec.n["A2"]
assert codec["A1"] is codec.n["A2"]
assert codec["A1"] is codec.p["A1"]
| 23.36
| 41
| 0.599315
|
import pytest
from skidl import *
from .setup_teardown import *
def test_pin_names_1():
codec = Part("xess.lib", "ak4520a")
assert codec["ain"] == codec.n["ain"]
assert codec[1:4] == codec.p[1:4]
def test_pin_names_2():
codec = Part("xess.lib", "ak4520a")
codec[4].name = "A1"
codec[8].name = "A2"
codec[8].num = "A1"
assert codec[4] is codec.n["A1"]
assert codec.p[4] is codec.n["A1"]
assert codec[4] is codec.p[4]
assert codec.p["A1"] is codec.n["A2"]
assert codec["A1"] is codec.n["A2"]
assert codec["A1"] is codec.p["A1"]
| true
| true
|
f701701fc0217b14ad2a52b62b79d747b1021bdd
| 5,395
|
py
|
Python
|
asynction/types.py
|
pedrohbtp/asynction
|
939f9b3fdf09f8bf89da9a8c5222234c07594d22
|
[
"MIT"
] | null | null | null |
asynction/types.py
|
pedrohbtp/asynction
|
939f9b3fdf09f8bf89da9a8c5222234c07594d22
|
[
"MIT"
] | null | null | null |
asynction/types.py
|
pedrohbtp/asynction
|
939f9b3fdf09f8bf89da9a8c5222234c07594d22
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from dataclasses import field
from typing import Any
from typing import Callable
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Type
from svarog import forge
from svarog import register_forge
from svarog.types import Forge
JSONMappingValue = Any
JSONMapping = Mapping[str, JSONMappingValue]
JSONSchema = JSONMapping
GLOBAL_NAMESPACE = "/"
@dataclass
class MessageAck:
"""The specification of a message acknowledgement"""
args: JSONSchema
@dataclass
class Message:
"""
https://www.asyncapi.com/docs/specifications/2.0.0#messageObject
The above message object is extended as follows:
* `x-handler`: Allows the coupling of the message specification to
an event handler (which is a python callable). It SHOULD only be used
for messages under a `publish` operation. Deserialized to `x_handler`.
* `x-ack`: The specification of the acknowledgement packet that the message receiver
transmits to the message sender. The acknowledgement args are passed as an input
to the callback of the `emit`/`send` function. Deserialized to `x_ack`.
The extentions are implemented as per:
https://www.asyncapi.com/docs/specifications/2.0.0#specificationExtensions
"""
name: str
payload: Optional[JSONSchema] = None
x_handler: Optional[str] = None
x_ack: Optional[MessageAck] = None
@staticmethod
def forge(type_: Type["Message"], data: JSONMapping, forge: Forge) -> "Message":
return type_(
name=forge(type_.__annotations__["name"], data["name"]),
payload=forge(type_.__annotations__["payload"], data.get("payload")),
x_handler=forge(type_.__annotations__["x_handler"], data.get("x-handler")),
x_ack=forge(type_.__annotations__["x_ack"], data.get("x-ack")),
)
register_forge(Message, Message.forge)
@dataclass
class OneOfMessages:
"""Using `oneOf` to specify multiple messages per operation"""
oneOf: Sequence[Message]
@staticmethod
def forge(
type_: Type["OneOfMessages"], data: JSONMapping, forge: Forge
) -> "OneOfMessages":
if "oneOf" in data:
return type_(
oneOf=forge(type_.__annotations__["oneOf"], data["oneOf"]),
)
return type_(oneOf=[forge(Message, data)])
def with_name(self, name: str) -> Optional[Message]:
for message in self.oneOf:
if message.name == name:
return message
return None
register_forge(OneOfMessages, OneOfMessages.forge)
@dataclass
class Operation:
"""https://www.asyncapi.com/docs/specifications/2.0.0#operationObject"""
message: OneOfMessages
@dataclass
class WebSocketsChannelBindings:
"""
https://github.com/asyncapi/bindings/tree/master/websockets#channel-binding-object
"""
method: Optional[str] = None
query: Optional[JSONSchema] = None
headers: Optional[JSONSchema] = None # TODO: Convert header properties to lowercase
bindingVersion: str = "latest"
@dataclass
class ChannelBindings:
"""https://www.asyncapi.com/docs/specifications/2.0.0#channelBindingsObject"""
ws: WebSocketsChannelBindings
@dataclass
class ChannelHandlers:
connect: Optional[str] = None
disconnect: Optional[str] = None
error: Optional[str] = None
@dataclass
class Channel:
"""
https://www.asyncapi.com/docs/specifications/2.0.0#channelItemObject
The above channel item object is extended to
support default namespace handlers as per:
https://www.asyncapi.com/docs/specifications/2.0.0#specificationExtensions
The `x_handlers` field is serialized as `x-handlers`.
"""
subscribe: Optional[Operation] = None
publish: Optional[Operation] = None
bindings: Optional[ChannelBindings] = None
x_handlers: Optional[ChannelHandlers] = None
def __post_init__(self):
if self.publish is not None:
for message in self.publish.message.oneOf:
if message.x_handler is None:
raise ValueError(
f"Message {message.name} is missing the x-handler attribute.\n"
"Every message under a publish operation "
"should have a handler defined."
)
@staticmethod
def forge(type_: Type["Channel"], data: JSONMapping, forge: Forge) -> "Channel":
return type_(
subscribe=forge(type_.__annotations__["subscribe"], data.get("subscribe")),
publish=forge(type_.__annotations__["publish"], data.get("publish")),
bindings=forge(type_.__annotations__["bindings"], data.get("bindings")),
x_handlers=forge(
type_.__annotations__["x_handlers"], data.get("x-handlers")
),
)
register_forge(Channel, Channel.forge)
@dataclass
class Server:
"""https://www.asyncapi.com/docs/specifications/2.0.0#serverObject"""
url: str
@dataclass
class AsyncApiSpec:
"""https://www.asyncapi.com/docs/specifications/2.0.0#A2SObject"""
channels: Mapping[str, Channel]
servers: Mapping[str, Server] = field(default_factory=dict)
@staticmethod
def from_dict(data: JSONMapping) -> "AsyncApiSpec":
return forge(AsyncApiSpec, data)
ErrorHandler = Callable[[Exception], None]
| 29.005376
| 88
| 0.67785
|
from dataclasses import dataclass
from dataclasses import field
from typing import Any
from typing import Callable
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Type
from svarog import forge
from svarog import register_forge
from svarog.types import Forge
JSONMappingValue = Any
JSONMapping = Mapping[str, JSONMappingValue]
JSONSchema = JSONMapping
GLOBAL_NAMESPACE = "/"
@dataclass
class MessageAck:
args: JSONSchema
@dataclass
class Message:
name: str
payload: Optional[JSONSchema] = None
x_handler: Optional[str] = None
x_ack: Optional[MessageAck] = None
@staticmethod
def forge(type_: Type["Message"], data: JSONMapping, forge: Forge) -> "Message":
return type_(
name=forge(type_.__annotations__["name"], data["name"]),
payload=forge(type_.__annotations__["payload"], data.get("payload")),
x_handler=forge(type_.__annotations__["x_handler"], data.get("x-handler")),
x_ack=forge(type_.__annotations__["x_ack"], data.get("x-ack")),
)
register_forge(Message, Message.forge)
@dataclass
class OneOfMessages:
oneOf: Sequence[Message]
@staticmethod
def forge(
type_: Type["OneOfMessages"], data: JSONMapping, forge: Forge
) -> "OneOfMessages":
if "oneOf" in data:
return type_(
oneOf=forge(type_.__annotations__["oneOf"], data["oneOf"]),
)
return type_(oneOf=[forge(Message, data)])
def with_name(self, name: str) -> Optional[Message]:
for message in self.oneOf:
if message.name == name:
return message
return None
register_forge(OneOfMessages, OneOfMessages.forge)
@dataclass
class Operation:
message: OneOfMessages
@dataclass
class WebSocketsChannelBindings:
method: Optional[str] = None
query: Optional[JSONSchema] = None
headers: Optional[JSONSchema] = None bindingVersion: str = "latest"
@dataclass
class ChannelBindings:
ws: WebSocketsChannelBindings
@dataclass
class ChannelHandlers:
connect: Optional[str] = None
disconnect: Optional[str] = None
error: Optional[str] = None
@dataclass
class Channel:
subscribe: Optional[Operation] = None
publish: Optional[Operation] = None
bindings: Optional[ChannelBindings] = None
x_handlers: Optional[ChannelHandlers] = None
def __post_init__(self):
if self.publish is not None:
for message in self.publish.message.oneOf:
if message.x_handler is None:
raise ValueError(
f"Message {message.name} is missing the x-handler attribute.\n"
"Every message under a publish operation "
"should have a handler defined."
)
@staticmethod
def forge(type_: Type["Channel"], data: JSONMapping, forge: Forge) -> "Channel":
return type_(
subscribe=forge(type_.__annotations__["subscribe"], data.get("subscribe")),
publish=forge(type_.__annotations__["publish"], data.get("publish")),
bindings=forge(type_.__annotations__["bindings"], data.get("bindings")),
x_handlers=forge(
type_.__annotations__["x_handlers"], data.get("x-handlers")
),
)
register_forge(Channel, Channel.forge)
@dataclass
class Server:
url: str
@dataclass
class AsyncApiSpec:
channels: Mapping[str, Channel]
servers: Mapping[str, Server] = field(default_factory=dict)
@staticmethod
def from_dict(data: JSONMapping) -> "AsyncApiSpec":
return forge(AsyncApiSpec, data)
ErrorHandler = Callable[[Exception], None]
| true
| true
|
f70170a2d31c03ce7897bd9581ae37e1f9d43725
| 415
|
py
|
Python
|
Equities/AHTable/ah_image.py
|
GSam/OCR-Pipelines
|
ca26d34d2e8271f014e53e4c90a58310790d2bee
|
[
"MIT"
] | null | null | null |
Equities/AHTable/ah_image.py
|
GSam/OCR-Pipelines
|
ca26d34d2e8271f014e53e4c90a58310790d2bee
|
[
"MIT"
] | null | null | null |
Equities/AHTable/ah_image.py
|
GSam/OCR-Pipelines
|
ca26d34d2e8271f014e53e4c90a58310790d2bee
|
[
"MIT"
] | null | null | null |
import subprocess
def process_image(filename, scale=1.0):
output, _ = subprocess.Popen(['./Capture2Text_CLI', '-platform',
'offscreen', '-i', filename,
'--blacklist', '~|\\V', '--scale-factor', str(scale)],
stdout=subprocess.PIPE).communicate()
# interpret output here
print output
return output
| 37.727273
| 88
| 0.513253
|
import subprocess
def process_image(filename, scale=1.0):
output, _ = subprocess.Popen(['./Capture2Text_CLI', '-platform',
'offscreen', '-i', filename,
'--blacklist', '~|\\V', '--scale-factor', str(scale)],
stdout=subprocess.PIPE).communicate()
print output
return output
| false
| true
|
f701720dc66511da8b41f4c16f88ff2e260dada6
| 408
|
py
|
Python
|
ia870/iaframe.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 5
|
2018-10-15T12:02:03.000Z
|
2022-02-11T12:47:12.000Z
|
ia870/iaframe.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 1
|
2018-10-15T12:04:36.000Z
|
2019-01-25T12:04:35.000Z
|
ia870/iaframe.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 4
|
2019-01-25T11:13:48.000Z
|
2020-12-20T01:42:33.000Z
|
# -*- encoding: utf-8 -*-
# Module iaframe
from numpy import *
def iaframe(f, WT=1, HT=1, DT=0, k1=None, k2=None):
from ia870 import iaunion, iaintersec,ialimits
if k1 is None: k1 = ialimits(f)[1]
if k2 is None: k2 = ialimits(f)[0]
assert len(f.shape)==2,'Supports 2D only'
y = iaintersec(f,k2)
y[:,0:WT] = k1
y[:,-WT:] = k1
y[0:HT,:] = k1
y[-HT:,:] = k1
return y
| 21.473684
| 51
| 0.561275
|
from numpy import *
def iaframe(f, WT=1, HT=1, DT=0, k1=None, k2=None):
from ia870 import iaunion, iaintersec,ialimits
if k1 is None: k1 = ialimits(f)[1]
if k2 is None: k2 = ialimits(f)[0]
assert len(f.shape)==2,'Supports 2D only'
y = iaintersec(f,k2)
y[:,0:WT] = k1
y[:,-WT:] = k1
y[0:HT,:] = k1
y[-HT:,:] = k1
return y
| true
| true
|
f701723f59060b992caf1d8a7d57f53a6176b3ab
| 731
|
py
|
Python
|
aula4-factory/lifetime.py
|
eduardofagnoni/curso-flask
|
65e7d78380910a65f3c18bdda7604875c38d4e6b
|
[
"Unlicense"
] | null | null | null |
aula4-factory/lifetime.py
|
eduardofagnoni/curso-flask
|
65e7d78380910a65f3c18bdda7604875c38d4e6b
|
[
"Unlicense"
] | null | null | null |
aula4-factory/lifetime.py
|
eduardofagnoni/curso-flask
|
65e7d78380910a65f3c18bdda7604875c38d4e6b
|
[
"Unlicense"
] | null | null | null |
#contextos
from flask import Flask
import flask
app = Flask(__name__)
## 1 Configuração
### Add configuração
app.config["DEBUG"] = True
app.config["SQLALCHEMY_DB_URI"] = "mysql://"
### Registrar Rotas
@app.route("/path")
def funcao():
pass
# ou
app.add_url_rule("/path", funcao)
### Inicializar extensões
#from flask_admin import Admin
#Admin.init_app(app)
### Registrar Blueprints
app.register_blueprint(...)
### add hooks
@app.before_request(...)
@app.errorhandler(...)
### Chamar outras factories
#views.init_app(app)
## 2 App Context
### App está pronto! 'app'
### Testar
#app.test_client
# debug
# objetos globais do flask
# (request, session, g)
#- Hooks
## 3 Request Context
### usar globais do flask
| 13.537037
| 44
| 0.689466
|
from flask import Flask
import flask
app = Flask(__name__)
app.config["DEBUG"] = True
app.config["SQLALCHEMY_DB_URI"] = "mysql://"
@app.route("/path")
def funcao():
pass
app.add_url_rule("/path", funcao)
app.register_blueprint(...)
@app.before_request(...)
@app.errorhandler(...)
| false
| true
|
f7017311d918b6323937deb442372097cda94beb
| 3,713
|
py
|
Python
|
tests/common.py
|
lucmichalski/saraki
|
74c11f70b4e7bdedfd33984cb96944c27a4eebbf
|
[
"MIT"
] | 3
|
2020-07-01T17:34:39.000Z
|
2021-05-04T17:53:01.000Z
|
tests/common.py
|
lucmichalski/saraki
|
74c11f70b4e7bdedfd33984cb96944c27a4eebbf
|
[
"MIT"
] | 25
|
2018-01-25T00:56:18.000Z
|
2021-06-12T04:29:00.000Z
|
tests/common.py
|
lucmichalski/saraki
|
74c11f70b4e7bdedfd33984cb96944c27a4eebbf
|
[
"MIT"
] | 4
|
2020-04-19T21:24:34.000Z
|
2021-01-23T19:04:27.000Z
|
import jwt
from contextlib import contextmanager
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Boolean
from sqlalchemy import ForeignKey, func
from sqlalchemy.orm import relationship
from saraki.auth import _request_ctx_stack, User, Org
from saraki.model import BaseModel, Model, database
class DummyBaseModel(BaseModel):
__tablename__ = "dummy_base_model"
id = Column(Integer, primary_key=True)
class DummyModel(Model):
__tablename__ = "dummy_model"
id = Column(Integer, primary_key=True)
class Person(Model):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
firstname = Column(String, nullable=False)
lastname = Column(String, nullable=False)
age = Column(Integer, nullable=False)
def export_data(self, include=("id", "firstname"), exclude=()):
return super(Person, self).export_data(include, exclude)
class Product(BaseModel):
__tablename__ = "product"
id = Column(Integer, primary_key=True)
name = Column(String(120), nullable=False)
color = Column(String, default="white")
price = Column(Integer, default=0)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(DateTime, nullable=False, server_default=func.now())
enabled = Column(Boolean, default=False)
class Order(BaseModel):
__tablename__ = "order"
id = Column(Integer, primary_key=True)
customer_id = Column(Integer, ForeignKey("person.id"), nullable=False)
lines = relationship("OrderLine")
customer = relationship("Person", uselist=False)
class OrderLine(Model):
__tablename__ = "order_line"
order_id = Column(Integer, ForeignKey("order.id"), nullable=False, primary_key=True)
product_id = Column(
Integer, ForeignKey("product.id"), nullable=False, primary_key=True
)
unit_price = Column(Integer, nullable=False)
quantity = Column(Integer, default=1, nullable=False)
product = relationship("Product", uselist=False)
def export_data(self, include=(), exclude=()):
include = tuple(include) + ("product_id", "unit_price", "quantity")
return super(OrderLine, self).export_data(include, exclude)
class Cartoon(Model):
__tablename__ = "cartoon"
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True, nullable=False)
nickname = Column(String(80), unique=True)
class Todo(Model):
__tablename__ = "todo"
id = Column(Integer, primary_key=True)
org_id = Column(Integer, ForeignKey("org.id"), nullable=False)
task = Column(String(200), nullable=False)
def login(username, orgname=None, scope=None):
iat = datetime.utcnow()
exp = iat + timedelta(seconds=6000)
payload = {"iss": "acme.local", "sub": username, "iat": iat, "exp": exp}
if orgname:
payload.update({"aud": orgname, "scp": {"org": ["manage"]}})
if scope:
payload.update({"scp": scope})
token = jwt.encode(payload, "secret").decode()
return f"JWT {token}"
@contextmanager
def auth_ctx(username, orgname=None):
_request_ctx_stack.top.current_user = User(id=1, username=username)
if orgname:
_request_ctx_stack.top.current_org = Org(id=1, orgname=orgname)
yield
def reset_secuence(table, column_name="id", schema_name="public"):
table_name = f"{schema_name}.{table.__tablename__}"
sql = f"SELECT pg_get_serial_sequence('{table_name}', '{column_name}');"
secuence_name = database.engine.execute(sql).fetchone()[0]
if secuence_name is not None:
sql = f"ALTER SEQUENCE {secuence_name} RESTART WITH 1;"
database.engine.execute(sql)
| 24.267974
| 88
| 0.693509
|
import jwt
from contextlib import contextmanager
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Boolean
from sqlalchemy import ForeignKey, func
from sqlalchemy.orm import relationship
from saraki.auth import _request_ctx_stack, User, Org
from saraki.model import BaseModel, Model, database
class DummyBaseModel(BaseModel):
__tablename__ = "dummy_base_model"
id = Column(Integer, primary_key=True)
class DummyModel(Model):
__tablename__ = "dummy_model"
id = Column(Integer, primary_key=True)
class Person(Model):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
firstname = Column(String, nullable=False)
lastname = Column(String, nullable=False)
age = Column(Integer, nullable=False)
def export_data(self, include=("id", "firstname"), exclude=()):
return super(Person, self).export_data(include, exclude)
class Product(BaseModel):
__tablename__ = "product"
id = Column(Integer, primary_key=True)
name = Column(String(120), nullable=False)
color = Column(String, default="white")
price = Column(Integer, default=0)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(DateTime, nullable=False, server_default=func.now())
enabled = Column(Boolean, default=False)
class Order(BaseModel):
__tablename__ = "order"
id = Column(Integer, primary_key=True)
customer_id = Column(Integer, ForeignKey("person.id"), nullable=False)
lines = relationship("OrderLine")
customer = relationship("Person", uselist=False)
class OrderLine(Model):
__tablename__ = "order_line"
order_id = Column(Integer, ForeignKey("order.id"), nullable=False, primary_key=True)
product_id = Column(
Integer, ForeignKey("product.id"), nullable=False, primary_key=True
)
unit_price = Column(Integer, nullable=False)
quantity = Column(Integer, default=1, nullable=False)
product = relationship("Product", uselist=False)
def export_data(self, include=(), exclude=()):
include = tuple(include) + ("product_id", "unit_price", "quantity")
return super(OrderLine, self).export_data(include, exclude)
class Cartoon(Model):
__tablename__ = "cartoon"
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True, nullable=False)
nickname = Column(String(80), unique=True)
class Todo(Model):
__tablename__ = "todo"
id = Column(Integer, primary_key=True)
org_id = Column(Integer, ForeignKey("org.id"), nullable=False)
task = Column(String(200), nullable=False)
def login(username, orgname=None, scope=None):
iat = datetime.utcnow()
exp = iat + timedelta(seconds=6000)
payload = {"iss": "acme.local", "sub": username, "iat": iat, "exp": exp}
if orgname:
payload.update({"aud": orgname, "scp": {"org": ["manage"]}})
if scope:
payload.update({"scp": scope})
token = jwt.encode(payload, "secret").decode()
return f"JWT {token}"
@contextmanager
def auth_ctx(username, orgname=None):
_request_ctx_stack.top.current_user = User(id=1, username=username)
if orgname:
_request_ctx_stack.top.current_org = Org(id=1, orgname=orgname)
yield
def reset_secuence(table, column_name="id", schema_name="public"):
table_name = f"{schema_name}.{table.__tablename__}"
sql = f"SELECT pg_get_serial_sequence('{table_name}', '{column_name}');"
secuence_name = database.engine.execute(sql).fetchone()[0]
if secuence_name is not None:
sql = f"ALTER SEQUENCE {secuence_name} RESTART WITH 1;"
database.engine.execute(sql)
| true
| true
|
f70173d8b3c5468c29206b1fd97bf8bc365f64be
| 3,104
|
py
|
Python
|
basic/string2.py
|
Ifo0/google-dev-course
|
c96d688ad4d50ec63e02b9edd671334dcd2d2811
|
[
"Apache-2.0"
] | null | null | null |
basic/string2.py
|
Ifo0/google-dev-course
|
c96d688ad4d50ec63e02b9edd671334dcd2d2811
|
[
"Apache-2.0"
] | null | null | null |
basic/string2.py
|
Ifo0/google-dev-course
|
c96d688ad4d50ec63e02b9edd671334dcd2d2811
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(word):
if len(word) >= 3:
if word[-3:] == 'ing':
return word + 'ly'
else:
return word + 'ing'
else:
return word
return word
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
neg_word = s.find('not')
if s.find('not') < s.find('bad'):
s = s[:neg_word] + 'good'
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
lenght_first = len(a)
lenght_second = len(b)
if len(a) % 2 == 0:
a_first_half = a[:len(a) // 2]
a_second_half = a[-len(a_first_half):]
if len(a) % 2 != 0:
a_first_half = a[:len(a) // 2 + 1]
a_second_half = a[-(len(a_first_half) - 1):]
if len(b) % 2 == 0:
b_first_half = b[:len(b) // 2]
b_second_half = b[-len(b_first_half):]
if len(b) % 2 != 0:
b_first_half = b[:len(b) // 2 + 1]
b_second_half = b[-(len(b_first_half) - 1):]
return a_first_half + b_first_half + a_second_half + b_second_half
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| 30.431373
| 77
| 0.641108
|
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(word):
if len(word) >= 3:
if word[-3:] == 'ing':
return word + 'ly'
else:
return word + 'ing'
else:
return word
return word
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
neg_word = s.find('not')
if s.find('not') < s.find('bad'):
s = s[:neg_word] + 'good'
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
def front_back(a, b):
lenght_first = len(a)
lenght_second = len(b)
if len(a) % 2 == 0:
a_first_half = a[:len(a) // 2]
a_second_half = a[-len(a_first_half):]
if len(a) % 2 != 0:
a_first_half = a[:len(a) // 2 + 1]
a_second_half = a[-(len(a_first_half) - 1):]
if len(b) % 2 == 0:
b_first_half = b[:len(b) // 2]
b_second_half = b[-len(b_first_half):]
if len(b) % 2 != 0:
b_first_half = b[:len(b) // 2 + 1]
b_second_half = b[-(len(b_first_half) - 1):]
return a_first_half + b_first_half + a_second_half + b_second_half
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| false
| true
|
f70173db91482b779289e981d3527cf4fdf8b6bc
| 1,990
|
py
|
Python
|
do_trim.py
|
eric-haibin-lin/text-proc
|
f84e2d6f802302fef5f20a2cb4b7583a57dd15c3
|
[
"Apache-2.0"
] | 9
|
2019-07-16T21:30:01.000Z
|
2022-01-26T02:32:26.000Z
|
do_trim.py
|
eric-haibin-lin/text-proc
|
f84e2d6f802302fef5f20a2cb4b7583a57dd15c3
|
[
"Apache-2.0"
] | null | null | null |
do_trim.py
|
eric-haibin-lin/text-proc
|
f84e2d6f802302fef5f20a2cb4b7583a57dd15c3
|
[
"Apache-2.0"
] | 2
|
2020-02-29T13:21:26.000Z
|
2020-08-19T09:27:29.000Z
|
from multiprocessing import Pool
import argparse
import glob
import os
import io
import time
import logging
import gluonnlp as nlp
import tokenizer as tokenization
parser = argparse.ArgumentParser(description='BERT tokenizer')
parser.add_argument('--input_files', type=str, default='wiki_*.doc',
help='Input files. Default is "wiki_*.doc"')
parser.add_argument('--nworker', type=int, default=8,
help='Number of workers for parallel processing.')
args = parser.parse_args()
args = parser.parse_args()
input_files = sorted(glob.glob(os.path.expanduser(args.input_files)))
num_files = len(input_files)
num_workers = args.nworker
logging.basicConfig(level=logging.INFO)
logging.info("Number of input files to process = %d"%(num_files))
# TODO(haibin) tokenize with vocab
exclude_patterns = [
'< no ##in ##cl ##ude >\n'
]
def in_pattern(x):
for pattern in exclude_patterns:
if len(x) == len(pattern) and x == pattern:
return True
return False
def f(input_file):
with io.open(input_file, 'r', encoding="utf-8") as fin:
assert input_file.endswith('.tokens'), 'Expects .doc suffix for input files'
with io.open(input_file.replace('.tokens', '.tks'), 'w', encoding="utf-8") as fout:
new_doc = True
with io.open(input_file, 'r', encoding="utf-8") as fin:
lines = fin.readlines()
for line in lines:
if new_doc:
new_doc = False
elif len(line) == 1 and line[0] == '\n':
new_doc = True
fout.write(u'\n')
elif in_pattern(line):
pass
else:
fout.write(line)
if __name__ == '__main__':
tic = time.time()
p = Pool(num_workers)
p.map(f, input_files)
toc = time.time()
logging.info("Processed %s in %.2f sec"%(args.input_files, toc-tic))
| 32.622951
| 91
| 0.59799
|
from multiprocessing import Pool
import argparse
import glob
import os
import io
import time
import logging
import gluonnlp as nlp
import tokenizer as tokenization
parser = argparse.ArgumentParser(description='BERT tokenizer')
parser.add_argument('--input_files', type=str, default='wiki_*.doc',
help='Input files. Default is "wiki_*.doc"')
parser.add_argument('--nworker', type=int, default=8,
help='Number of workers for parallel processing.')
args = parser.parse_args()
args = parser.parse_args()
input_files = sorted(glob.glob(os.path.expanduser(args.input_files)))
num_files = len(input_files)
num_workers = args.nworker
logging.basicConfig(level=logging.INFO)
logging.info("Number of input files to process = %d"%(num_files))
exclude_patterns = [
'< no ##in ##cl ##ude >\n'
]
def in_pattern(x):
for pattern in exclude_patterns:
if len(x) == len(pattern) and x == pattern:
return True
return False
def f(input_file):
with io.open(input_file, 'r', encoding="utf-8") as fin:
assert input_file.endswith('.tokens'), 'Expects .doc suffix for input files'
with io.open(input_file.replace('.tokens', '.tks'), 'w', encoding="utf-8") as fout:
new_doc = True
with io.open(input_file, 'r', encoding="utf-8") as fin:
lines = fin.readlines()
for line in lines:
if new_doc:
new_doc = False
elif len(line) == 1 and line[0] == '\n':
new_doc = True
fout.write(u'\n')
elif in_pattern(line):
pass
else:
fout.write(line)
if __name__ == '__main__':
tic = time.time()
p = Pool(num_workers)
p.map(f, input_files)
toc = time.time()
logging.info("Processed %s in %.2f sec"%(args.input_files, toc-tic))
| true
| true
|
f701749353fbb9936ba4dc99198030f7ba70dc97
| 6,952
|
py
|
Python
|
_scripts/image_slicer/main.py
|
deniskolokol/soma
|
88cf7d9e19c5c66e98d48798658ac67737b6ff89
|
[
"MIT"
] | null | null | null |
_scripts/image_slicer/main.py
|
deniskolokol/soma
|
88cf7d9e19c5c66e98d48798658ac67737b6ff89
|
[
"MIT"
] | null | null | null |
_scripts/image_slicer/main.py
|
deniskolokol/soma
|
88cf7d9e19c5c66e98d48798658ac67737b6ff89
|
[
"MIT"
] | null | null | null |
'''
Main functionality of ``image_slicer``.
'''
import os
import time
import optparse
from math import sqrt, ceil, floor
from PIL import Image
from helpers import get_basename
class Tile(object):
"""Represents a single tile."""
def __init__(self, image, number, position, coords, filename=None):
self.image = image
self.number = number
self.position = position
self.coords = coords
self.filename = filename
@property
def row(self):
return self.position[0]
@property
def column(self):
return self.position[1]
@property
def basename(self):
"""Strip path and extension. Return base filename."""
return get_basename(self.filename)
def generate_filename(self, directory=os.getcwd(), prefix='tile',
format='png', path=True):
"""Construct and return a filename for this tile."""
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format(
col=self.column, row=self.row, ext=format)
if not path:
return filename
return os.path.join(directory, filename)
def save(self, filename=None, format='png'):
if not filename:
filename = self.generate_filename(format=format)
self.image.save(filename, format)
self.filename = filename
def __repr__(self):
"""Show tile number, and if saved to disk, filename."""
if self.filename:
return '<Tile #{} - {}>'.format(self.number,
os.path.basename(self.filename))
return '<Tile #{}>'.format(self.number)
def calc_columns_rows(n):
"""
Calculate the number of columns and rows required to divide an image
into ``n`` parts.
Return a tuple of integers in the format (num_columns, num_rows)
"""
num_columns = int(ceil(sqrt(n)))
num_rows = int(ceil(n / float(num_columns)))
return (num_columns, num_rows)
# return (5, 7)
def get_combined_size(tiles):
"""Calculate combined size of tiles."""
# TODO: Refactor calculating layout to avoid repetition.
columns, rows = calc_columns_rows(len(tiles))
tile_size = tiles[0].image.size
return (tile_size[0] * columns, tile_size[1] * rows)
def join(tiles):
"""
@param ``tiles`` - Tuple of ``Image`` instances.
@return ``Image`` instance.
"""
im = Image.new('RGB', get_combined_size(tiles), None)
columns, rows = calc_columns_rows(len(tiles))
for tile in tiles:
im.paste(tile.image, tile.coords)
return im
def validate_image(image, number_tiles):
"""Basic sanity checks prior to performing a split."""
TILE_LIMIT = 99 * 99
try:
number_tiles = int(number_tiles)
except:
raise ValueError('number_tiles could not be cast to integer.')
if number_tiles > TILE_LIMIT or number_tiles < 2:
raise ValueError('Number of tiles must be between 2 and {} (you \
asked for {}).'.format(TILE_LIMIT, number_tiles))
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):
"""
Write image files to disk. Create specified folder(s) if they
don't exist. Return list of :class:`Tile` instance.
Args:
tiles (list): List, tuple or set of :class:`Tile` objects to save.
prefix (str): Filename prefix of saved tiles.
Kwargs:
directory (str): Directory to save tiles. Created if non-existant.
Returns:
Tuple of :class:`Tile` instances.
"""
# Causes problems in CLI script.
# if not os.path.exists(directory):
# os.makedirs(directory)
for tile in tiles:
tile.save(filename=tile.generate_filename(prefix=prefix,
directory=directory,
format=format))
return tuple(tiles)
def _do_slice(filename, output, **kwargs):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
# validate_image(im, number_tiles)
im_w, im_h = im.size
# columns, rows = calc_columns_rows(number_tiles)
columns = int(kwargs.get('columns', 5))
rows = int(kwargs.get('rows', 7))
number_tiles = (columns * rows) - 1
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if not kwargs['dry_run']:
save_tiles(tiles,
prefix=get_basename(filename),
directory=output)
return tuple(tiles)
def main(path, **kwargs):
if os.path.isdir(path):
fnames = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f)) and f.endswith(".jpg")]
output = os.path.join(os.path.abspath(path), 'output')
else:
fnames = [path]
output = os.path.join(os.path.dirname(os.path.abspath(path)), 'output')
if not os.path.exists(output):
os.makedirs(output)
for filename in fnames:
tiles = _do_slice(filename, output, **kwargs)
print "In %s: saved %d tiles for file %s" % (output, len(tiles), filename)
if __name__ == '__main__':
parser = optparse.OptionParser(usage="usage: python %prog [OPTIONS] filename")
parser.add_option("-c", "--columns",
action="store",
dest="columns",
help="Number of columns")
parser.add_option("-r", "--rows",
action="store",
dest="rows",
default=0,
help="Number of rows")
parser.add_option('-d', "--dry",
action='store_true',
dest='dry_run',
default=False,
help='Dry run (do not actually perform anything, only report).')
opts, args = parser.parse_args()
start_time = time.time()
try:
main(args[0], **vars(opts))
except IndexError:
print "You must specify source filename!"
exit()
print "Done, took %d seconds" % int(time.time() - start_time)
| 32.485981
| 86
| 0.584292
|
'''
Main functionality of ``image_slicer``.
'''
import os
import time
import optparse
from math import sqrt, ceil, floor
from PIL import Image
from helpers import get_basename
class Tile(object):
"""Represents a single tile."""
def __init__(self, image, number, position, coords, filename=None):
self.image = image
self.number = number
self.position = position
self.coords = coords
self.filename = filename
@property
def row(self):
return self.position[0]
@property
def column(self):
return self.position[1]
@property
def basename(self):
"""Strip path and extension. Return base filename."""
return get_basename(self.filename)
def generate_filename(self, directory=os.getcwd(), prefix='tile',
format='png', path=True):
"""Construct and return a filename for this tile."""
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format(
col=self.column, row=self.row, ext=format)
if not path:
return filename
return os.path.join(directory, filename)
def save(self, filename=None, format='png'):
if not filename:
filename = self.generate_filename(format=format)
self.image.save(filename, format)
self.filename = filename
def __repr__(self):
"""Show tile number, and if saved to disk, filename."""
if self.filename:
return '<Tile #{} - {}>'.format(self.number,
os.path.basename(self.filename))
return '<Tile #{}>'.format(self.number)
def calc_columns_rows(n):
"""
Calculate the number of columns and rows required to divide an image
into ``n`` parts.
Return a tuple of integers in the format (num_columns, num_rows)
"""
num_columns = int(ceil(sqrt(n)))
num_rows = int(ceil(n / float(num_columns)))
return (num_columns, num_rows)
def get_combined_size(tiles):
"""Calculate combined size of tiles."""
columns, rows = calc_columns_rows(len(tiles))
tile_size = tiles[0].image.size
return (tile_size[0] * columns, tile_size[1] * rows)
def join(tiles):
"""
@param ``tiles`` - Tuple of ``Image`` instances.
@return ``Image`` instance.
"""
im = Image.new('RGB', get_combined_size(tiles), None)
columns, rows = calc_columns_rows(len(tiles))
for tile in tiles:
im.paste(tile.image, tile.coords)
return im
def validate_image(image, number_tiles):
"""Basic sanity checks prior to performing a split."""
TILE_LIMIT = 99 * 99
try:
number_tiles = int(number_tiles)
except:
raise ValueError('number_tiles could not be cast to integer.')
if number_tiles > TILE_LIMIT or number_tiles < 2:
raise ValueError('Number of tiles must be between 2 and {} (you \
asked for {}).'.format(TILE_LIMIT, number_tiles))
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):
"""
Write image files to disk. Create specified folder(s) if they
don't exist. Return list of :class:`Tile` instance.
Args:
tiles (list): List, tuple or set of :class:`Tile` objects to save.
prefix (str): Filename prefix of saved tiles.
Kwargs:
directory (str): Directory to save tiles. Created if non-existant.
Returns:
Tuple of :class:`Tile` instances.
"""
# Causes problems in CLI script.
# if not os.path.exists(directory):
# os.makedirs(directory)
for tile in tiles:
tile.save(filename=tile.generate_filename(prefix=prefix,
directory=directory,
format=format))
return tuple(tiles)
def _do_slice(filename, output, **kwargs):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
# validate_image(im, number_tiles)
im_w, im_h = im.size
# columns, rows = calc_columns_rows(number_tiles)
columns = int(kwargs.get('columns', 5))
rows = int(kwargs.get('rows', 7))
number_tiles = (columns * rows) - 1
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if not kwargs['dry_run']:
save_tiles(tiles,
prefix=get_basename(filename),
directory=output)
return tuple(tiles)
def main(path, **kwargs):
if os.path.isdir(path):
fnames = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f)) and f.endswith(".jpg")]
output = os.path.join(os.path.abspath(path), 'output')
else:
fnames = [path]
output = os.path.join(os.path.dirname(os.path.abspath(path)), 'output')
if not os.path.exists(output):
os.makedirs(output)
for filename in fnames:
tiles = _do_slice(filename, output, **kwargs)
print "In %s: saved %d tiles for file %s" % (output, len(tiles), filename)
if __name__ == '__main__':
parser = optparse.OptionParser(usage="usage: python %prog [OPTIONS] filename")
parser.add_option("-c", "--columns",
action="store",
dest="columns",
help="Number of columns")
parser.add_option("-r", "--rows",
action="store",
dest="rows",
default=0,
help="Number of rows")
parser.add_option('-d', "--dry",
action='store_true',
dest='dry_run',
default=False,
help='Dry run (do not actually perform anything, only report).')
opts, args = parser.parse_args()
start_time = time.time()
try:
main(args[0], **vars(opts))
except IndexError:
print "You must specify source filename!"
exit()
print "Done, took %d seconds" % int(time.time() - start_time)
| false
| true
|
f7017509f204ca09eefcaa0814db20be4363c56d
| 445
|
py
|
Python
|
d9b.py
|
jogloran/advent-of-code-2020
|
9804f1eb8d94c991d9aa3348f01f4bf65c195849
|
[
"MIT"
] | null | null | null |
d9b.py
|
jogloran/advent-of-code-2020
|
9804f1eb8d94c991d9aa3348f01f4bf65c195849
|
[
"MIT"
] | null | null | null |
d9b.py
|
jogloran/advent-of-code-2020
|
9804f1eb8d94c991d9aa3348f01f4bf65c195849
|
[
"MIT"
] | null | null | null |
import sys; from more_itertools import windowed, first_true
orig_data = list(map(int, open('d9.txt')))
data = orig_data[:]
target = 32321523
for i, e in enumerate(data):
if i == 0: continue
data[i] = data[i - 1] + data[i]
for i in range(len(data)):
for j in range(i):
if data[i] - data[j] == target:
print(j, i, 'inclusive')
print(min(orig_data[j:i+1]) + max(orig_data[j:i+1]))
sys.exit()
| 31.785714
| 64
| 0.58427
|
import sys; from more_itertools import windowed, first_true
orig_data = list(map(int, open('d9.txt')))
data = orig_data[:]
target = 32321523
for i, e in enumerate(data):
if i == 0: continue
data[i] = data[i - 1] + data[i]
for i in range(len(data)):
for j in range(i):
if data[i] - data[j] == target:
print(j, i, 'inclusive')
print(min(orig_data[j:i+1]) + max(orig_data[j:i+1]))
sys.exit()
| true
| true
|
f70175903618b46cbc2465f247c1e2539656a5a4
| 6,355
|
py
|
Python
|
zhaquirks/xiaomi/aqara/remote_b186acn01.py
|
sylvaing/zha-device-handlers
|
61dedab318a8986b2b0a7bffe5116bbc40bed214
|
[
"Apache-2.0"
] | null | null | null |
zhaquirks/xiaomi/aqara/remote_b186acn01.py
|
sylvaing/zha-device-handlers
|
61dedab318a8986b2b0a7bffe5116bbc40bed214
|
[
"Apache-2.0"
] | null | null | null |
zhaquirks/xiaomi/aqara/remote_b186acn01.py
|
sylvaing/zha-device-handlers
|
61dedab318a8986b2b0a7bffe5116bbc40bed214
|
[
"Apache-2.0"
] | null | null | null |
"""Xiaomi aqara single key switch device."""
import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import (
AnalogInput,
Basic,
Groups,
Identify,
MultistateInput,
OnOff,
Ota,
Scenes,
)
from .. import (
LUMI,
XIAOMI_NODE_DESC,
BasicCluster,
XiaomiPowerConfiguration,
XiaomiQuickInitDevice,
)
from ... import CustomCluster
from ...const import (
ATTR_ID,
COMMAND,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
NODE_DESCRIPTOR,
OUTPUT_CLUSTERS,
PRESS_TYPE,
PROFILE_ID,
SHORT_PRESS,
SKIP_CONFIGURATION,
VALUE,
ZHA_SEND_EVENT,
)
DOUBLE = "double"
HOLD = "long press"
PRESS_TYPES = {0: "long press", 1: "single", 2: "double"}
SINGLE = "single"
STATUS_TYPE_ATTR = 0x0055 # decimal = 85
XIAOMI_CLUSTER_ID = 0xFFFF
XIAOMI_DEVICE_TYPE = 0x5F01
XIAOMI_DEVICE_TYPE2 = 0x5F02
XIAOMI_DEVICE_TYPE3 = 0x5F03
_LOGGER = logging.getLogger(__name__)
class RemoteB186ACN01(XiaomiQuickInitDevice):
"""Aqara single key switch device."""
class MultistateInputCluster(CustomCluster, MultistateInput):
"""Multistate input cluster."""
cluster_id = MultistateInput.cluster_id
def __init__(self, *args, **kwargs):
"""Init."""
self._current_state = None
super().__init__(*args, **kwargs)
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid == STATUS_TYPE_ATTR:
self._current_state = PRESS_TYPES.get(value)
event_args = {
PRESS_TYPE: self._current_state,
ATTR_ID: attrid,
VALUE: value,
}
self.listener_event(ZHA_SEND_EVENT, self._current_state, event_args)
# show something in the sensor in HA
super()._update_attribute(0, self._current_state)
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=24321
# device_version=1
# input_clusters=[0, 3, 25, 65535, 18]
# output_clusters=[0, 4, 3, 5, 25, 65535, 18]>
MODELS_INFO: [
(LUMI, "lumi.remote.b186acn01"),
(LUMI, "lumi.remote.b186acn02"),
(LUMI, "lumi.sensor_86sw1"),
],
NODE_DESCRIPTOR: XIAOMI_NODE_DESC,
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=24322
# device_version=1
# input_clusters=[3, 18]
# output_clusters=[4, 3, 5, 18]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE2,
INPUT_CLUSTERS: [
Identify.cluster_id,
MultistateInputCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
MultistateInputCluster.cluster_id,
],
},
# <SimpleDescriptor endpoint=3 profile=260 device_type=24323
# device_version=1
# input_clusters=[3, 12]
# output_clusters=[4, 3, 5, 12]>
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE3,
INPUT_CLUSTERS: [Identify.cluster_id, AnalogInput.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id,
],
},
},
}
replacement = {
SKIP_CONFIGURATION: True,
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
BasicCluster,
XiaomiPowerConfiguration,
Identify.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster,
OnOff.cluster_id,
],
},
2: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [Identify.cluster_id, MultistateInputCluster],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
MultistateInputCluster,
],
},
3: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [Identify.cluster_id, MultistateInputCluster],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id,
MultistateInputCluster,
],
},
},
}
device_automation_triggers = {
(DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: DOUBLE},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: HOLD},
}
| 31.305419
| 84
| 0.51786
|
import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import (
AnalogInput,
Basic,
Groups,
Identify,
MultistateInput,
OnOff,
Ota,
Scenes,
)
from .. import (
LUMI,
XIAOMI_NODE_DESC,
BasicCluster,
XiaomiPowerConfiguration,
XiaomiQuickInitDevice,
)
from ... import CustomCluster
from ...const import (
ATTR_ID,
COMMAND,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
NODE_DESCRIPTOR,
OUTPUT_CLUSTERS,
PRESS_TYPE,
PROFILE_ID,
SHORT_PRESS,
SKIP_CONFIGURATION,
VALUE,
ZHA_SEND_EVENT,
)
DOUBLE = "double"
HOLD = "long press"
PRESS_TYPES = {0: "long press", 1: "single", 2: "double"}
SINGLE = "single"
STATUS_TYPE_ATTR = 0x0055 XIAOMI_CLUSTER_ID = 0xFFFF
XIAOMI_DEVICE_TYPE = 0x5F01
XIAOMI_DEVICE_TYPE2 = 0x5F02
XIAOMI_DEVICE_TYPE3 = 0x5F03
_LOGGER = logging.getLogger(__name__)
class RemoteB186ACN01(XiaomiQuickInitDevice):
class MultistateInputCluster(CustomCluster, MultistateInput):
cluster_id = MultistateInput.cluster_id
def __init__(self, *args, **kwargs):
self._current_state = None
super().__init__(*args, **kwargs)
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid == STATUS_TYPE_ATTR:
self._current_state = PRESS_TYPES.get(value)
event_args = {
PRESS_TYPE: self._current_state,
ATTR_ID: attrid,
VALUE: value,
}
self.listener_event(ZHA_SEND_EVENT, self._current_state, event_args)
super()._update_attribute(0, self._current_state)
signature = {
MODELS_INFO: [
(LUMI, "lumi.remote.b186acn01"),
(LUMI, "lumi.remote.b186acn02"),
(LUMI, "lumi.sensor_86sw1"),
],
NODE_DESCRIPTOR: XIAOMI_NODE_DESC,
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE2,
INPUT_CLUSTERS: [
Identify.cluster_id,
MultistateInputCluster.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
MultistateInputCluster.cluster_id,
],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: XIAOMI_DEVICE_TYPE3,
INPUT_CLUSTERS: [Identify.cluster_id, AnalogInput.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id,
],
},
},
}
replacement = {
SKIP_CONFIGURATION: True,
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [
BasicCluster,
XiaomiPowerConfiguration,
Identify.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInputCluster,
OnOff.cluster_id,
],
},
2: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [Identify.cluster_id, MultistateInputCluster],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
MultistateInputCluster,
],
},
3: {
DEVICE_TYPE: zha.DeviceType.REMOTE_CONTROL,
INPUT_CLUSTERS: [Identify.cluster_id, MultistateInputCluster],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id,
MultistateInputCluster,
],
},
},
}
device_automation_triggers = {
(DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: DOUBLE},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: HOLD},
}
| true
| true
|
f701763c784e49214fd9a7e52a90d440b81882b8
| 28,284
|
py
|
Python
|
vscode/extensions/ms-python.python-2022.4.1/pythonFiles/lib/python/debugpy/adapter/clients.py
|
despresj/dotfiles
|
1c76fc4efcc125f74aa5ebdc4aa521277a360379
|
[
"MIT"
] | null | null | null |
vscode/extensions/ms-python.python-2022.4.1/pythonFiles/lib/python/debugpy/adapter/clients.py
|
despresj/dotfiles
|
1c76fc4efcc125f74aa5ebdc4aa521277a360379
|
[
"MIT"
] | null | null | null |
vscode/extensions/ms-python.python-2022.4.1/pythonFiles/lib/python/debugpy/adapter/clients.py
|
despresj/dotfiles
|
1c76fc4efcc125f74aa5ebdc4aa521277a360379
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import os
import sys
import debugpy
from debugpy import adapter, common, launcher
from debugpy.common import compat, fmt, json, log, messaging, sockets
from debugpy.common.compat import unicode
from debugpy.adapter import components, servers, sessions
class Client(components.Component):
"""Handles the client side of a debug session."""
message_handler = components.Component.message_handler
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsVariableType": False,
"supportsVariablePaging": False,
"supportsRunInTerminalRequest": False,
"supportsMemoryReferences": False,
}
class Expectations(components.Capabilities):
PROPERTIES = {
"locale": "en-US",
"linesStartAt1": True,
"columnsStartAt1": True,
"pathFormat": json.enum("path", optional=True), # we don't support "uri"
}
def __init__(self, sock):
if sock == "stdio":
log.info("Connecting to client over stdio...", self)
stream = messaging.JsonIOStream.from_stdio()
# Make sure that nothing else tries to interfere with the stdio streams
# that are going to be used for DAP communication from now on.
sys.stdin = stdin = open(os.devnull, "r")
atexit.register(stdin.close)
sys.stdout = stdout = open(os.devnull, "w")
atexit.register(stdout.close)
else:
stream = messaging.JsonIOStream.from_socket(sock)
with sessions.Session() as session:
super(Client, self).__init__(session, stream)
self.client_id = None
"""ID of the connecting client. This can be 'test' while running tests."""
self.has_started = False
"""Whether the "launch" or "attach" request was received from the client, and
fully handled.
"""
self.start_request = None
"""The "launch" or "attach" request as received from the client.
"""
self._initialize_request = None
"""The "initialize" request as received from the client, to propagate to the
server later."""
self._deferred_events = []
"""Deferred events from the launcher and the server that must be propagated
only if and when the "launch" or "attach" response is sent.
"""
self._known_subprocesses = set()
"""servers.Connection instances for subprocesses that this client has been
made aware of.
"""
session.client = self
session.register()
# For the transition period, send the telemetry events with both old and new
# name. The old one should be removed once the new one lights up.
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "ptvsd",
"data": {"packageVersion": debugpy.__version__},
},
)
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "debugpy",
"data": {"packageVersion": debugpy.__version__},
},
)
def propagate_after_start(self, event):
# pydevd starts sending events as soon as we connect, but the client doesn't
# expect to see any until it receives the response to "launch" or "attach"
# request. If client is not ready yet, save the event instead of propagating
# it immediately.
if self._deferred_events is not None:
self._deferred_events.append(event)
log.debug("Propagation deferred.")
else:
self.client.channel.propagate(event)
def _propagate_deferred_events(self):
log.debug("Propagating deferred events to {0}...", self.client)
for event in self._deferred_events:
log.debug("Propagating deferred {0}", event.describe())
self.client.channel.propagate(event)
log.info("All deferred events propagated to {0}.", self.client)
self._deferred_events = None
# Generic event handler. There are no specific handlers for client events, because
# there are no events from the client in DAP - but we propagate them if we can, in
# case some events appear in future protocol versions.
@message_handler
def event(self, event):
if self.server:
self.server.channel.propagate(event)
# Generic request handler, used if there's no specific handler below.
@message_handler
def request(self, request):
return self.server.channel.delegate(request)
@message_handler
def initialize_request(self, request):
if self._initialize_request is not None:
raise request.isnt_valid("Session is already initialized")
self.client_id = request("clientID", "")
self.capabilities = self.Capabilities(self, request)
self.expectations = self.Expectations(self, request)
self._initialize_request = request
exception_breakpoint_filters = [
{
"filter": "raised",
"label": "Raised Exceptions",
"default": False,
"description": "Break whenever any exception is raised.",
},
{
"filter": "uncaught",
"label": "Uncaught Exceptions",
"default": True,
"description": "Break when the process is exiting due to unhandled exception.",
},
{
"filter": "userUnhandled",
"label": "User Uncaught Exceptions",
"default": False,
"description": "Break when exception escapes into library code.",
},
]
return {
"supportsCompletionsRequest": True,
"supportsConditionalBreakpoints": True,
"supportsConfigurationDoneRequest": True,
"supportsDebuggerProperties": True,
"supportsDelayedStackTraceLoading": True,
"supportsEvaluateForHovers": True,
"supportsExceptionInfoRequest": True,
"supportsExceptionOptions": True,
"supportsFunctionBreakpoints": True,
"supportsHitConditionalBreakpoints": True,
"supportsLogPoints": True,
"supportsModulesRequest": True,
"supportsSetExpression": True,
"supportsSetVariable": True,
"supportsValueFormattingOptions": True,
"supportsTerminateDebuggee": True,
"supportsGotoTargetsRequest": True,
"supportsClipboardContext": True,
"exceptionBreakpointFilters": exception_breakpoint_filters,
"supportsStepInTargetsRequest": True,
}
# Common code for "launch" and "attach" request handlers.
#
# See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522
# for the sequence of request and events necessary to orchestrate the start.
def _start_message_handler(f):
@components.Component.message_handler
def handle(self, request):
assert request.is_request("launch", "attach")
if self._initialize_request is None:
raise request.isnt_valid("Session is not initialized yet")
if self.launcher or self.server:
raise request.isnt_valid("Session is already started")
self.session.no_debug = request("noDebug", json.default(False))
if self.session.no_debug:
servers.dont_wait_for_first_connection()
self.session.debug_options = debug_options = set(
request("debugOptions", json.array(unicode))
)
f(self, request)
if request.response is not None:
return
if self.server:
self.server.initialize(self._initialize_request)
self._initialize_request = None
arguments = request.arguments
if self.launcher:
redirecting = arguments.get("console") == "internalConsole"
if "RedirectOutput" in debug_options:
# The launcher is doing output redirection, so we don't need the
# server to do it, as well.
arguments = dict(arguments)
arguments["debugOptions"] = list(
debug_options - {"RedirectOutput"}
)
redirecting = True
if arguments.get("redirectOutput"):
arguments = dict(arguments)
del arguments["redirectOutput"]
redirecting = True
arguments["isOutputRedirected"] = redirecting
# pydevd doesn't send "initialized", and responds to the start request
# immediately, without waiting for "configurationDone". If it changes
# to conform to the DAP spec, we'll need to defer waiting for response.
try:
self.server.channel.request(request.command, arguments)
except messaging.NoMoreMessages:
# Server closed connection before we could receive the response to
# "attach" or "launch" - this can happen when debuggee exits shortly
# after starting. It's not an error, but we can't do anything useful
# here at this point, either, so just bail out.
request.respond({})
self.session.finalize(
fmt(
"{0} disconnected before responding to {1!j}",
self.server,
request.command,
)
)
return
except messaging.MessageHandlingError as exc:
exc.propagate(request)
if self.session.no_debug:
self.start_request = request
self.has_started = True
request.respond({})
self._propagate_deferred_events()
return
if "clientOS" in request:
client_os = request("clientOS", json.enum("windows", "unix")).upper()
elif {"WindowsClient", "Windows"} & debug_options:
client_os = "WINDOWS"
elif {"UnixClient", "UNIX"} & debug_options:
client_os = "UNIX"
else:
client_os = "WINDOWS" if sys.platform == "win32" else "UNIX"
self.server.channel.request(
"setDebuggerProperty",
{
"skipSuspendOnBreakpointException": ("BaseException",),
"skipPrintBreakpointException": ("NameError",),
"multiThreadsSingleNotification": True,
"ideOS": client_os,
},
)
# Let the client know that it can begin configuring the adapter.
self.channel.send_event("initialized")
self.start_request = request
return messaging.NO_RESPONSE # will respond on "configurationDone"
return handle
@_start_message_handler
def launch_request(self, request):
from debugpy.adapter import launchers
if self.session.id != 1 or len(servers.connections()):
raise request.cant_handle('"attach" expected')
debug_options = set(request("debugOptions", json.array(unicode)))
# Handling of properties that can also be specified as legacy "debugOptions" flags.
# If property is explicitly set to false, but the flag is in "debugOptions", treat
# it as an error. Returns None if the property wasn't explicitly set either way.
def property_or_debug_option(prop_name, flag_name):
assert prop_name[0].islower() and flag_name[0].isupper()
value = request(prop_name, bool, optional=True)
if value == ():
value = None
if flag_name in debug_options:
if value is False:
raise request.isnt_valid(
'{0!j}:false and "debugOptions":[{1!j}] are mutually exclusive',
prop_name,
flag_name,
)
value = True
return value
# "pythonPath" is a deprecated legacy spelling. If "python" is missing, then try
# the alternative. But if both are missing, the error message should say "python".
python_key = "python"
if python_key in request:
if "pythonPath" in request:
raise request.isnt_valid(
'"pythonPath" is not valid if "python" is specified'
)
elif "pythonPath" in request:
python_key = "pythonPath"
python = request(python_key, json.array(unicode, vectorize=True, size=(0,)))
if not len(python):
python = [compat.filename(sys.executable)]
python += request("pythonArgs", json.array(unicode, size=(0,)))
request.arguments["pythonArgs"] = python[1:]
request.arguments["python"] = python
launcher_python = request("debugLauncherPython", unicode, optional=True)
if launcher_python == ():
launcher_python = python[0]
program = module = code = ()
if "program" in request:
program = request("program", unicode)
args = [program]
request.arguments["processName"] = program
if "module" in request:
module = request("module", unicode)
args = ["-m", module]
request.arguments["processName"] = module
if "code" in request:
code = request("code", json.array(unicode, vectorize=True, size=(1,)))
args = ["-c", "\n".join(code)]
request.arguments["processName"] = "-c"
num_targets = len([x for x in (program, module, code) if x != ()])
if num_targets == 0:
raise request.isnt_valid(
'either "program", "module", or "code" must be specified'
)
elif num_targets != 1:
raise request.isnt_valid(
'"program", "module", and "code" are mutually exclusive'
)
# Propagate "args" via CLI if and only if shell expansion is requested.
args_expansion = request(
"argsExpansion", json.enum("shell", "none", optional=True)
)
if args_expansion == "shell":
args += request("args", json.array(unicode))
request.arguments.pop("args", None)
cwd = request("cwd", unicode, optional=True)
if cwd == ():
# If it's not specified, but we're launching a file rather than a module,
# and the specified path has a directory in it, use that.
cwd = None if program == () else (os.path.dirname(program) or None)
console = request(
"console",
json.enum(
"internalConsole",
"integratedTerminal",
"externalTerminal",
optional=True,
),
)
console_title = request("consoleTitle", json.default("Python Debug Console"))
sudo = bool(property_or_debug_option("sudo", "Sudo"))
if sudo and sys.platform == "win32":
raise request.cant_handle('"sudo":true is not supported on Windows.')
launcher_path = request("debugLauncherPath", os.path.dirname(launcher.__file__))
adapter_host = request("debugAdapterHost", "127.0.0.1")
try:
servers.serve(adapter_host)
except Exception as exc:
raise request.cant_handle(
"{0} couldn't create listener socket for servers: {1}",
self.session,
exc,
)
launchers.spawn_debuggee(
self.session,
request,
[launcher_python],
launcher_path,
adapter_host,
args,
cwd,
console,
console_title,
sudo,
)
@_start_message_handler
def attach_request(self, request):
if self.session.no_debug:
raise request.isnt_valid('"noDebug" is not supported for "attach"')
host = request("host", unicode, optional=True)
port = request("port", int, optional=True)
listen = request("listen", dict, optional=True)
connect = request("connect", dict, optional=True)
pid = request("processId", (int, unicode), optional=True)
sub_pid = request("subProcessId", int, optional=True)
if host != () or port != ():
if listen != ():
raise request.isnt_valid(
'"listen" and "host"/"port" are mutually exclusive'
)
if connect != ():
raise request.isnt_valid(
'"connect" and "host"/"port" are mutually exclusive'
)
if listen != ():
if connect != ():
raise request.isnt_valid(
'"listen" and "connect" are mutually exclusive'
)
if pid != ():
raise request.isnt_valid(
'"listen" and "processId" are mutually exclusive'
)
if sub_pid != ():
raise request.isnt_valid(
'"listen" and "subProcessId" are mutually exclusive'
)
if pid != () and sub_pid != ():
raise request.isnt_valid(
'"processId" and "subProcessId" are mutually exclusive'
)
if listen != ():
host = listen("host", "127.0.0.1")
port = listen("port", int)
adapter.access_token = None
host, port = servers.serve(host, port)
else:
host, port = servers.serve()
# There are four distinct possibilities here.
#
# If "processId" is specified, this is attach-by-PID. We need to inject the
# debug server into the designated process, and then wait until it connects
# back to us. Since the injected server can crash, there must be a timeout.
#
# If "subProcessId" is specified, this is attach to a known subprocess, likely
# in response to a "debugpyAttach" event. If so, the debug server should be
# connected already, and thus the wait timeout is zero.
#
# If "listen" is specified, this is attach-by-socket with the server expected
# to connect to the adapter via debugpy.connect(). There is no PID known in
# advance, so just wait until the first server connection indefinitely, with
# no timeout.
#
# If "connect" is specified, this is attach-by-socket in which the server has
# spawned the adapter via debugpy.listen(). There is no PID known to the client
# in advance, but the server connection should be either be there already, or
# the server should be connecting shortly, so there must be a timeout.
#
# In the last two cases, if there's more than one server connection already,
# this is a multiprocess re-attach. The client doesn't know the PID, so we just
# connect it to the oldest server connection that we have - in most cases, it
# will be the one for the root debuggee process, but if it has exited already,
# it will be some subprocess.
if pid != ():
if not isinstance(pid, int):
try:
pid = int(pid)
except Exception:
raise request.isnt_valid('"processId" must be parseable as int')
debugpy_args = request("debugpyArgs", json.array(unicode))
servers.inject(pid, debugpy_args)
timeout = common.PROCESS_SPAWN_TIMEOUT
pred = lambda conn: conn.pid == pid
else:
if sub_pid == ():
pred = lambda conn: True
timeout = common.PROCESS_SPAWN_TIMEOUT if listen == () else None
else:
pred = lambda conn: conn.pid == sub_pid
timeout = 0
self.channel.send_event("debugpyWaitingForServer", {"host": host, "port": port})
conn = servers.wait_for_connection(self.session, pred, timeout)
if conn is None:
if sub_pid != ():
# If we can't find a matching subprocess, it's not always an error -
# it might have already exited, or didn't even get a chance to connect.
# To prevent the client from complaining, pretend that the "attach"
# request was successful, but that the session terminated immediately.
request.respond({})
self.session.finalize(
fmt('No known subprocess with "subProcessId":{0}', sub_pid)
)
return
raise request.cant_handle(
(
"Timed out waiting for debug server to connect."
if timeout
else "There is no debug server connected to this adapter."
),
sub_pid,
)
try:
conn.attach_to_session(self.session)
except ValueError:
request.cant_handle("{0} is already being debugged.", conn)
@message_handler
def configurationDone_request(self, request):
if self.start_request is None or self.has_started:
request.cant_handle(
'"configurationDone" is only allowed during handling of a "launch" '
'or an "attach" request'
)
try:
self.has_started = True
try:
result = self.server.channel.delegate(request)
except messaging.NoMoreMessages:
# Server closed connection before we could receive the response to
# "configurationDone" - this can happen when debuggee exits shortly
# after starting. It's not an error, but we can't do anything useful
# here at this point, either, so just bail out.
request.respond({})
self.start_request.respond({})
self.session.finalize(
fmt(
"{0} disconnected before responding to {1!j}",
self.server,
request.command,
)
)
return
else:
request.respond(result)
except messaging.MessageHandlingError as exc:
self.start_request.cant_handle(str(exc))
finally:
if self.start_request.response is None:
self.start_request.respond({})
self._propagate_deferred_events()
# Notify the client of any child processes of the debuggee that aren't already
# being debugged.
for conn in servers.connections():
if conn.server is None and conn.ppid == self.session.pid:
self.notify_of_subprocess(conn)
@message_handler
def evaluate_request(self, request):
propagated_request = self.server.channel.propagate(request)
def handle_response(response):
request.respond(response.body)
propagated_request.on_response(handle_response)
return messaging.NO_RESPONSE
@message_handler
def pause_request(self, request):
request.arguments["threadId"] = "*"
return self.server.channel.delegate(request)
@message_handler
def continue_request(self, request):
request.arguments["threadId"] = "*"
try:
return self.server.channel.delegate(request)
except messaging.NoMoreMessages:
# pydevd can sometimes allow the debuggee to exit before the queued
# "continue" response gets sent. Thus, a failed "continue" response
# indicating that the server disconnected should be treated as success.
return {"allThreadsContinued": True}
@message_handler
def debugpySystemInfo_request(self, request):
result = {"debugpy": {"version": debugpy.__version__}}
if self.server:
try:
pydevd_info = self.server.channel.request("pydevdSystemInfo")
except Exception:
# If the server has already disconnected, or couldn't handle it,
# report what we've got.
pass
else:
result.update(pydevd_info)
return result
@message_handler
def terminate_request(self, request):
self.session.finalize('client requested "terminate"', terminate_debuggee=True)
return {}
@message_handler
def disconnect_request(self, request):
terminate_debuggee = request("terminateDebuggee", bool, optional=True)
if terminate_debuggee == ():
terminate_debuggee = None
self.session.finalize('client requested "disconnect"', terminate_debuggee)
return {}
def notify_of_subprocess(self, conn):
with self.session:
if self.start_request is None or conn in self._known_subprocesses:
return
if "processId" in self.start_request.arguments:
log.warning(
"Not reporting subprocess for {0}, because the parent process "
'was attached to using "processId" rather than "port".',
self.session,
)
return
log.info("Notifying {0} about {1}.", self, conn)
body = dict(self.start_request.arguments)
self._known_subprocesses.add(conn)
for key in "processId", "listen", "preLaunchTask", "postDebugTask":
body.pop(key, None)
body["name"] = fmt("Subprocess {0}", conn.pid)
body["request"] = "attach"
body["subProcessId"] = conn.pid
for key in "args", "processName", "pythonArgs":
body.pop(key, None)
host = body.pop("host", None)
port = body.pop("port", None)
if "connect" not in body:
body["connect"] = {}
if "host" not in body["connect"]:
body["connect"]["host"] = host if host is not None else "127.0.0.1"
if "port" not in body["connect"]:
if port is None:
_, port = listener.getsockname()
body["connect"]["port"] = port
self.channel.send_event("debugpyAttach", body)
def serve(host, port):
global listener
listener = sockets.serve("Client", Client, host, port)
return listener.getsockname()
def stop_serving():
try:
listener.close()
except Exception:
log.swallow_exception(level="warning")
| 41.110465
| 96
| 0.554306
|
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import os
import sys
import debugpy
from debugpy import adapter, common, launcher
from debugpy.common import compat, fmt, json, log, messaging, sockets
from debugpy.common.compat import unicode
from debugpy.adapter import components, servers, sessions
class Client(components.Component):
message_handler = components.Component.message_handler
class Capabilities(components.Capabilities):
PROPERTIES = {
"supportsVariableType": False,
"supportsVariablePaging": False,
"supportsRunInTerminalRequest": False,
"supportsMemoryReferences": False,
}
class Expectations(components.Capabilities):
PROPERTIES = {
"locale": "en-US",
"linesStartAt1": True,
"columnsStartAt1": True,
"pathFormat": json.enum("path", optional=True), }
def __init__(self, sock):
if sock == "stdio":
log.info("Connecting to client over stdio...", self)
stream = messaging.JsonIOStream.from_stdio()
# Make sure that nothing else tries to interfere with the stdio streams
# that are going to be used for DAP communication from now on.
sys.stdin = stdin = open(os.devnull, "r")
atexit.register(stdin.close)
sys.stdout = stdout = open(os.devnull, "w")
atexit.register(stdout.close)
else:
stream = messaging.JsonIOStream.from_socket(sock)
with sessions.Session() as session:
super(Client, self).__init__(session, stream)
self.client_id = None
self.has_started = False
self.start_request = None
self._initialize_request = None
self._deferred_events = []
self._known_subprocesses = set()
session.client = self
session.register()
# For the transition period, send the telemetry events with both old and new
# name. The old one should be removed once the new one lights up.
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "ptvsd",
"data": {"packageVersion": debugpy.__version__},
},
)
self.channel.send_event(
"output",
{
"category": "telemetry",
"output": "debugpy",
"data": {"packageVersion": debugpy.__version__},
},
)
def propagate_after_start(self, event):
# pydevd starts sending events as soon as we connect, but the client doesn't
if self._deferred_events is not None:
self._deferred_events.append(event)
log.debug("Propagation deferred.")
else:
self.client.channel.propagate(event)
def _propagate_deferred_events(self):
log.debug("Propagating deferred events to {0}...", self.client)
for event in self._deferred_events:
log.debug("Propagating deferred {0}", event.describe())
self.client.channel.propagate(event)
log.info("All deferred events propagated to {0}.", self.client)
self._deferred_events = None
@message_handler
def event(self, event):
if self.server:
self.server.channel.propagate(event)
@message_handler
def request(self, request):
return self.server.channel.delegate(request)
@message_handler
def initialize_request(self, request):
if self._initialize_request is not None:
raise request.isnt_valid("Session is already initialized")
self.client_id = request("clientID", "")
self.capabilities = self.Capabilities(self, request)
self.expectations = self.Expectations(self, request)
self._initialize_request = request
exception_breakpoint_filters = [
{
"filter": "raised",
"label": "Raised Exceptions",
"default": False,
"description": "Break whenever any exception is raised.",
},
{
"filter": "uncaught",
"label": "Uncaught Exceptions",
"default": True,
"description": "Break when the process is exiting due to unhandled exception.",
},
{
"filter": "userUnhandled",
"label": "User Uncaught Exceptions",
"default": False,
"description": "Break when exception escapes into library code.",
},
]
return {
"supportsCompletionsRequest": True,
"supportsConditionalBreakpoints": True,
"supportsConfigurationDoneRequest": True,
"supportsDebuggerProperties": True,
"supportsDelayedStackTraceLoading": True,
"supportsEvaluateForHovers": True,
"supportsExceptionInfoRequest": True,
"supportsExceptionOptions": True,
"supportsFunctionBreakpoints": True,
"supportsHitConditionalBreakpoints": True,
"supportsLogPoints": True,
"supportsModulesRequest": True,
"supportsSetExpression": True,
"supportsSetVariable": True,
"supportsValueFormattingOptions": True,
"supportsTerminateDebuggee": True,
"supportsGotoTargetsRequest": True,
"supportsClipboardContext": True,
"exceptionBreakpointFilters": exception_breakpoint_filters,
"supportsStepInTargetsRequest": True,
}
# Common code for "launch" and "attach" request handlers.
#
# See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522
# for the sequence of request and events necessary to orchestrate the start.
def _start_message_handler(f):
@components.Component.message_handler
def handle(self, request):
assert request.is_request("launch", "attach")
if self._initialize_request is None:
raise request.isnt_valid("Session is not initialized yet")
if self.launcher or self.server:
raise request.isnt_valid("Session is already started")
self.session.no_debug = request("noDebug", json.default(False))
if self.session.no_debug:
servers.dont_wait_for_first_connection()
self.session.debug_options = debug_options = set(
request("debugOptions", json.array(unicode))
)
f(self, request)
if request.response is not None:
return
if self.server:
self.server.initialize(self._initialize_request)
self._initialize_request = None
arguments = request.arguments
if self.launcher:
redirecting = arguments.get("console") == "internalConsole"
if "RedirectOutput" in debug_options:
# The launcher is doing output redirection, so we don't need the
arguments = dict(arguments)
arguments["debugOptions"] = list(
debug_options - {"RedirectOutput"}
)
redirecting = True
if arguments.get("redirectOutput"):
arguments = dict(arguments)
del arguments["redirectOutput"]
redirecting = True
arguments["isOutputRedirected"] = redirecting
# immediately, without waiting for "configurationDone". If it changes
# to conform to the DAP spec, we'll need to defer waiting for response.
try:
self.server.channel.request(request.command, arguments)
except messaging.NoMoreMessages:
request.respond({})
self.session.finalize(
fmt(
"{0} disconnected before responding to {1!j}",
self.server,
request.command,
)
)
return
except messaging.MessageHandlingError as exc:
exc.propagate(request)
if self.session.no_debug:
self.start_request = request
self.has_started = True
request.respond({})
self._propagate_deferred_events()
return
if "clientOS" in request:
client_os = request("clientOS", json.enum("windows", "unix")).upper()
elif {"WindowsClient", "Windows"} & debug_options:
client_os = "WINDOWS"
elif {"UnixClient", "UNIX"} & debug_options:
client_os = "UNIX"
else:
client_os = "WINDOWS" if sys.platform == "win32" else "UNIX"
self.server.channel.request(
"setDebuggerProperty",
{
"skipSuspendOnBreakpointException": ("BaseException",),
"skipPrintBreakpointException": ("NameError",),
"multiThreadsSingleNotification": True,
"ideOS": client_os,
},
)
self.channel.send_event("initialized")
self.start_request = request
return messaging.NO_RESPONSE
return handle
@_start_message_handler
def launch_request(self, request):
from debugpy.adapter import launchers
if self.session.id != 1 or len(servers.connections()):
raise request.cant_handle('"attach" expected')
debug_options = set(request("debugOptions", json.array(unicode)))
def property_or_debug_option(prop_name, flag_name):
assert prop_name[0].islower() and flag_name[0].isupper()
value = request(prop_name, bool, optional=True)
if value == ():
value = None
if flag_name in debug_options:
if value is False:
raise request.isnt_valid(
'{0!j}:false and "debugOptions":[{1!j}] are mutually exclusive',
prop_name,
flag_name,
)
value = True
return value
# "pythonPath" is a deprecated legacy spelling. If "python" is missing, then try
# the alternative. But if both are missing, the error message should say "python".
python_key = "python"
if python_key in request:
if "pythonPath" in request:
raise request.isnt_valid(
'"pythonPath" is not valid if "python" is specified'
)
elif "pythonPath" in request:
python_key = "pythonPath"
python = request(python_key, json.array(unicode, vectorize=True, size=(0,)))
if not len(python):
python = [compat.filename(sys.executable)]
python += request("pythonArgs", json.array(unicode, size=(0,)))
request.arguments["pythonArgs"] = python[1:]
request.arguments["python"] = python
launcher_python = request("debugLauncherPython", unicode, optional=True)
if launcher_python == ():
launcher_python = python[0]
program = module = code = ()
if "program" in request:
program = request("program", unicode)
args = [program]
request.arguments["processName"] = program
if "module" in request:
module = request("module", unicode)
args = ["-m", module]
request.arguments["processName"] = module
if "code" in request:
code = request("code", json.array(unicode, vectorize=True, size=(1,)))
args = ["-c", "\n".join(code)]
request.arguments["processName"] = "-c"
num_targets = len([x for x in (program, module, code) if x != ()])
if num_targets == 0:
raise request.isnt_valid(
'either "program", "module", or "code" must be specified'
)
elif num_targets != 1:
raise request.isnt_valid(
'"program", "module", and "code" are mutually exclusive'
)
# Propagate "args" via CLI if and only if shell expansion is requested.
args_expansion = request(
"argsExpansion", json.enum("shell", "none", optional=True)
)
if args_expansion == "shell":
args += request("args", json.array(unicode))
request.arguments.pop("args", None)
cwd = request("cwd", unicode, optional=True)
if cwd == ():
# If it's not specified, but we're launching a file rather than a module,
# and the specified path has a directory in it, use that.
cwd = None if program == () else (os.path.dirname(program) or None)
console = request(
"console",
json.enum(
"internalConsole",
"integratedTerminal",
"externalTerminal",
optional=True,
),
)
console_title = request("consoleTitle", json.default("Python Debug Console"))
sudo = bool(property_or_debug_option("sudo", "Sudo"))
if sudo and sys.platform == "win32":
raise request.cant_handle('"sudo":true is not supported on Windows.')
launcher_path = request("debugLauncherPath", os.path.dirname(launcher.__file__))
adapter_host = request("debugAdapterHost", "127.0.0.1")
try:
servers.serve(adapter_host)
except Exception as exc:
raise request.cant_handle(
"{0} couldn't create listener socket for servers: {1}",
self.session,
exc,
)
launchers.spawn_debuggee(
self.session,
request,
[launcher_python],
launcher_path,
adapter_host,
args,
cwd,
console,
console_title,
sudo,
)
@_start_message_handler
def attach_request(self, request):
if self.session.no_debug:
raise request.isnt_valid('"noDebug" is not supported for "attach"')
host = request("host", unicode, optional=True)
port = request("port", int, optional=True)
listen = request("listen", dict, optional=True)
connect = request("connect", dict, optional=True)
pid = request("processId", (int, unicode), optional=True)
sub_pid = request("subProcessId", int, optional=True)
if host != () or port != ():
if listen != ():
raise request.isnt_valid(
'"listen" and "host"/"port" are mutually exclusive'
)
if connect != ():
raise request.isnt_valid(
'"connect" and "host"/"port" are mutually exclusive'
)
if listen != ():
if connect != ():
raise request.isnt_valid(
'"listen" and "connect" are mutually exclusive'
)
if pid != ():
raise request.isnt_valid(
'"listen" and "processId" are mutually exclusive'
)
if sub_pid != ():
raise request.isnt_valid(
'"listen" and "subProcessId" are mutually exclusive'
)
if pid != () and sub_pid != ():
raise request.isnt_valid(
'"processId" and "subProcessId" are mutually exclusive'
)
if listen != ():
host = listen("host", "127.0.0.1")
port = listen("port", int)
adapter.access_token = None
host, port = servers.serve(host, port)
else:
host, port = servers.serve()
# this is a multiprocess re-attach. The client doesn't know the PID, so we just
if pid != ():
if not isinstance(pid, int):
try:
pid = int(pid)
except Exception:
raise request.isnt_valid('"processId" must be parseable as int')
debugpy_args = request("debugpyArgs", json.array(unicode))
servers.inject(pid, debugpy_args)
timeout = common.PROCESS_SPAWN_TIMEOUT
pred = lambda conn: conn.pid == pid
else:
if sub_pid == ():
pred = lambda conn: True
timeout = common.PROCESS_SPAWN_TIMEOUT if listen == () else None
else:
pred = lambda conn: conn.pid == sub_pid
timeout = 0
self.channel.send_event("debugpyWaitingForServer", {"host": host, "port": port})
conn = servers.wait_for_connection(self.session, pred, timeout)
if conn is None:
if sub_pid != ():
# To prevent the client from complaining, pretend that the "attach"
# request was successful, but that the session terminated immediately.
request.respond({})
self.session.finalize(
fmt('No known subprocess with "subProcessId":{0}', sub_pid)
)
return
raise request.cant_handle(
(
"Timed out waiting for debug server to connect."
if timeout
else "There is no debug server connected to this adapter."
),
sub_pid,
)
try:
conn.attach_to_session(self.session)
except ValueError:
request.cant_handle("{0} is already being debugged.", conn)
@message_handler
def configurationDone_request(self, request):
if self.start_request is None or self.has_started:
request.cant_handle(
'"configurationDone" is only allowed during handling of a "launch" '
'or an "attach" request'
)
try:
self.has_started = True
try:
result = self.server.channel.delegate(request)
except messaging.NoMoreMessages:
# Server closed connection before we could receive the response to
# "configurationDone" - this can happen when debuggee exits shortly
# after starting. It's not an error, but we can't do anything useful
# here at this point, either, so just bail out.
request.respond({})
self.start_request.respond({})
self.session.finalize(
fmt(
"{0} disconnected before responding to {1!j}",
self.server,
request.command,
)
)
return
else:
request.respond(result)
except messaging.MessageHandlingError as exc:
self.start_request.cant_handle(str(exc))
finally:
if self.start_request.response is None:
self.start_request.respond({})
self._propagate_deferred_events()
# Notify the client of any child processes of the debuggee that aren't already
for conn in servers.connections():
if conn.server is None and conn.ppid == self.session.pid:
self.notify_of_subprocess(conn)
@message_handler
def evaluate_request(self, request):
propagated_request = self.server.channel.propagate(request)
def handle_response(response):
request.respond(response.body)
propagated_request.on_response(handle_response)
return messaging.NO_RESPONSE
@message_handler
def pause_request(self, request):
request.arguments["threadId"] = "*"
return self.server.channel.delegate(request)
@message_handler
def continue_request(self, request):
request.arguments["threadId"] = "*"
try:
return self.server.channel.delegate(request)
except messaging.NoMoreMessages:
return {"allThreadsContinued": True}
@message_handler
def debugpySystemInfo_request(self, request):
result = {"debugpy": {"version": debugpy.__version__}}
if self.server:
try:
pydevd_info = self.server.channel.request("pydevdSystemInfo")
except Exception:
# report what we've got.
pass
else:
result.update(pydevd_info)
return result
@message_handler
def terminate_request(self, request):
self.session.finalize('client requested "terminate"', terminate_debuggee=True)
return {}
@message_handler
def disconnect_request(self, request):
terminate_debuggee = request("terminateDebuggee", bool, optional=True)
if terminate_debuggee == ():
terminate_debuggee = None
self.session.finalize('client requested "disconnect"', terminate_debuggee)
return {}
def notify_of_subprocess(self, conn):
with self.session:
if self.start_request is None or conn in self._known_subprocesses:
return
if "processId" in self.start_request.arguments:
log.warning(
"Not reporting subprocess for {0}, because the parent process "
'was attached to using "processId" rather than "port".',
self.session,
)
return
log.info("Notifying {0} about {1}.", self, conn)
body = dict(self.start_request.arguments)
self._known_subprocesses.add(conn)
for key in "processId", "listen", "preLaunchTask", "postDebugTask":
body.pop(key, None)
body["name"] = fmt("Subprocess {0}", conn.pid)
body["request"] = "attach"
body["subProcessId"] = conn.pid
for key in "args", "processName", "pythonArgs":
body.pop(key, None)
host = body.pop("host", None)
port = body.pop("port", None)
if "connect" not in body:
body["connect"] = {}
if "host" not in body["connect"]:
body["connect"]["host"] = host if host is not None else "127.0.0.1"
if "port" not in body["connect"]:
if port is None:
_, port = listener.getsockname()
body["connect"]["port"] = port
self.channel.send_event("debugpyAttach", body)
def serve(host, port):
global listener
listener = sockets.serve("Client", Client, host, port)
return listener.getsockname()
def stop_serving():
try:
listener.close()
except Exception:
log.swallow_exception(level="warning")
| true
| true
|
f701767e7eb4bd50bad3cd14a4cce3b563d834a2
| 510
|
py
|
Python
|
p03.2/double_letters.py
|
LukeBriggsDev/GCSE-Code-Tasks
|
ed696873f6f9980e32b85ab7850cef8e75d52604
|
[
"MIT"
] | null | null | null |
p03.2/double_letters.py
|
LukeBriggsDev/GCSE-Code-Tasks
|
ed696873f6f9980e32b85ab7850cef8e75d52604
|
[
"MIT"
] | null | null | null |
p03.2/double_letters.py
|
LukeBriggsDev/GCSE-Code-Tasks
|
ed696873f6f9980e32b85ab7850cef8e75d52604
|
[
"MIT"
] | null | null | null |
"""
Problem:
The function 'doubler' takes a word as input.
It should create and print
a string, where each character in the string is doubled, for example:
"test" -> "tteesstt"
Tests:
>>> doubler("test")
tteesstt
>>> doubler("original")
oorriiggiinnaall
>>> doubler("hihihi")
hhiihhiihhii
"""
import doctest
def run_tests():
doctest.testmod(verbose=True)
def doubler(word):
print(''.join([char + char for char in word]))
if __name__ == "__main__":
run_tests()
| 15
| 69
| 0.65098
|
import doctest
def run_tests():
doctest.testmod(verbose=True)
def doubler(word):
print(''.join([char + char for char in word]))
if __name__ == "__main__":
run_tests()
| true
| true
|
f70176d180413d27094964996f759d35f158b8ae
| 1,328
|
py
|
Python
|
idread.py
|
sdwhturbosun/pn532_spi4student
|
72af24e3e7a72811589fbc225f231330d470acad
|
[
"MIT"
] | null | null | null |
idread.py
|
sdwhturbosun/pn532_spi4student
|
72af24e3e7a72811589fbc225f231330d470acad
|
[
"MIT"
] | null | null | null |
idread.py
|
sdwhturbosun/pn532_spi4student
|
72af24e3e7a72811589fbc225f231330d470acad
|
[
"MIT"
] | null | null | null |
import binascii
import sys
import Adafruit_PN532 as PN532
# Setup how the PN532 is connected to the Raspbery Pi/BeagleBone Black.
# It is recommended to use a software SPI connection with 4 digital GPIO pins.
# Configuration for a Raspberry Pi:
CS = 8 #pn532_nss----->rpi_ce0:8
MOSI = 9 #pn532_mosi---->rpi__miso:9
MISO = 10 #pn532_miso---->rpi__mosi:10
SCLK = 11 #pn532_sck----->rpi_sclk:11
# Configuration for a BeagleBone Black:
# CS = 'P8_7'
# MOSI = 'P8_8'
# MISO = 'P8_9'
# SCLK = 'P8_10'
# Create an instance of the PN532 class.
pn532 = PN532.PN532(cs=CS, sclk=SCLK, mosi=MOSI, miso=MISO)
# Call begin to initialize communication with the PN532. Must be done before
# any other calls to the PN532!
pn532.begin()
# Get the firmware version from the chip and print(it out.)
ic, ver, rev, support = pn532.get_firmware_version()
print('Found PN532 with firmware version: {0}.{1}'.format(ver, rev))
# Configure PN532 to communicate with MiFare cards.
pn532.SAM_configuration()
# Main loop to detect cards and read a block.
while True:
print('等待读卡中,请将卡靠近pn532读取设备...')
# Check if a card is available to read.
uid = pn532.read_passive_target()
# Try again if no card is available.
if uid is None:
continue
uid=format(binascii.hexlify(uid))
print("UID:",uid)
| 26.039216
| 78
| 0.700301
|
import binascii
import sys
import Adafruit_PN532 as PN532
CS = 8 MOSI = 9 MISO = 10 SCLK = 11
pn532 = PN532.PN532(cs=CS, sclk=SCLK, mosi=MOSI, miso=MISO)
pn532.begin()
ic, ver, rev, support = pn532.get_firmware_version()
print('Found PN532 with firmware version: {0}.{1}'.format(ver, rev))
pn532.SAM_configuration()
while True:
print('等待读卡中,请将卡靠近pn532读取设备...')
uid = pn532.read_passive_target()
if uid is None:
continue
uid=format(binascii.hexlify(uid))
print("UID:",uid)
| true
| true
|
f70177f8792e21fd19853261c7c4d9f5f44b972b
| 79
|
py
|
Python
|
tests/test_cli.py
|
pbujold/macaqueModules
|
3f55ec45f691972e40cc8bd98071b7934ae24349
|
[
"MIT"
] | 1
|
2021-08-25T08:45:52.000Z
|
2021-08-25T08:45:52.000Z
|
tests/test_cli.py
|
pbujold/macaqueModules
|
3f55ec45f691972e40cc8bd98071b7934ae24349
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
pbujold/macaqueModules
|
3f55ec45f691972e40cc8bd98071b7934ae24349
|
[
"MIT"
] | null | null | null |
from macaque import cli
def test_cli_template():
assert cli.cli() is None
| 15.8
| 28
| 0.734177
|
from macaque import cli
def test_cli_template():
assert cli.cli() is None
| true
| true
|
f7017816f2ca3a9c790e346ca4d93eb29edd1df0
| 262
|
py
|
Python
|
17.05.2022/POO/parte3/webBonus/meu_site.py
|
N0N4T0/python-codes
|
ac2b884f86749a8b179ff972cdb316ec4e005b32
|
[
"MIT"
] | null | null | null |
17.05.2022/POO/parte3/webBonus/meu_site.py
|
N0N4T0/python-codes
|
ac2b884f86749a8b179ff972cdb316ec4e005b32
|
[
"MIT"
] | null | null | null |
17.05.2022/POO/parte3/webBonus/meu_site.py
|
N0N4T0/python-codes
|
ac2b884f86749a8b179ff972cdb316ec4e005b32
|
[
"MIT"
] | null | null | null |
# Antes de mais nada install o flask = pip install flask
from flask import Flask
app = Flask(__name__)
@app.route('/')
def homepage():
return 'Essa é minha HomePage'
@app.route('/contatos')
def contatos():
return 'Essa são os meus contatos'
app.run()
| 18.714286
| 56
| 0.694656
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def homepage():
return 'Essa é minha HomePage'
@app.route('/contatos')
def contatos():
return 'Essa são os meus contatos'
app.run()
| true
| true
|
f70178817daa3868f8cf3f8e570980b45d7a7bd8
| 1,183
|
py
|
Python
|
unjabberlib/formatters.py
|
adsr303/unjabber
|
159f5fc8468e51c885a97c215196241c63b42a1e
|
[
"MIT"
] | null | null | null |
unjabberlib/formatters.py
|
adsr303/unjabber
|
159f5fc8468e51c885a97c215196241c63b42a1e
|
[
"MIT"
] | null | null | null |
unjabberlib/formatters.py
|
adsr303/unjabber
|
159f5fc8468e51c885a97c215196241c63b42a1e
|
[
"MIT"
] | null | null | null |
from itertools import zip_longest
DAY = 'day'
HOUR = 'hour'
NAME = 'name'
class Formatter:
def __init__(self, indent=5 * ' '):
self.indent = indent
def append(self, text, tag=None):
raise NotImplementedError('Must override append() in derived class')
def println(self, *args):
sep = None
for a in args:
if sep:
self.append(sep)
else:
sep = ' '
if isinstance(a, str):
self.append(a)
else:
self.append(*a)
self.append('\n')
def show(self, previous, day, hour, name, text):
if day:
if previous:
self.println()
self.println((day, DAY))
if name:
if not day:
self.println()
self.println((hour, HOUR), (name, NAME))
self.show_multiline(None, text)
else:
self.show_multiline(hour, text)
def show_multiline(self, hour, text):
hh = [(hour, HOUR)] if hour else []
for h, line in zip_longest(hh, text.split('\n'), fillvalue=self.indent):
self.println(h, line)
| 26.288889
| 80
| 0.50634
|
from itertools import zip_longest
DAY = 'day'
HOUR = 'hour'
NAME = 'name'
class Formatter:
def __init__(self, indent=5 * ' '):
self.indent = indent
def append(self, text, tag=None):
raise NotImplementedError('Must override append() in derived class')
def println(self, *args):
sep = None
for a in args:
if sep:
self.append(sep)
else:
sep = ' '
if isinstance(a, str):
self.append(a)
else:
self.append(*a)
self.append('\n')
def show(self, previous, day, hour, name, text):
if day:
if previous:
self.println()
self.println((day, DAY))
if name:
if not day:
self.println()
self.println((hour, HOUR), (name, NAME))
self.show_multiline(None, text)
else:
self.show_multiline(hour, text)
def show_multiline(self, hour, text):
hh = [(hour, HOUR)] if hour else []
for h, line in zip_longest(hh, text.split('\n'), fillvalue=self.indent):
self.println(h, line)
| true
| true
|
f70178a47dec7e27f047d239baba69c6694baf37
| 1,910
|
py
|
Python
|
src/prototypes/prototypes.py
|
kprzybyla/prototypes
|
e4a8eb05071a1df62ed59fd5d4e69510a9db8e8a
|
[
"MIT"
] | null | null | null |
src/prototypes/prototypes.py
|
kprzybyla/prototypes
|
e4a8eb05071a1df62ed59fd5d4e69510a9db8e8a
|
[
"MIT"
] | null | null | null |
src/prototypes/prototypes.py
|
kprzybyla/prototypes
|
e4a8eb05071a1df62ed59fd5d4e69510a9db8e8a
|
[
"MIT"
] | null | null | null |
__all__ = [
"prototype",
]
import sys
from inspect import (
signature,
)
from typing import (
TypeVar,
Callable,
)
from .exceptions import (
PrototypeError,
)
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec # pragma: no cover
Parameters = ParamSpec("Parameters")
ReturnType = TypeVar("ReturnType")
# noinspection PyTypeHints
def prototype(
proto: Callable[Parameters, ReturnType],
/,
*,
runtime: bool = True,
) -> Callable[Parameters, ReturnType]:
"""
Prototype decorator acts like a type protection shield
that validates the parameters specification and return
type annotation of the function against given prototype.
If `runtime` parameter is set to True, decorator performs
prototype validation during runtime using the :class:`Signature`
class from :module:`inspect` module by comparing function and
prototype signatures against each other.
:param proto: prototype function
:param runtime: when set to True, performs prototype validation during runtime
:raises PrototypeError:
When function has incompatible signature for given prototype.
Exception is raised only when `runtime` argument is set to True.
"""
# noinspection PyTypeHints
def decorator(func: Callable[Parameters, ReturnType], /) -> Callable[Parameters, ReturnType]:
if runtime is True:
func_signature = signature(func)
proto_signature = signature(proto)
if func_signature.parameters != proto_signature.parameters:
raise PrototypeError(func, func_signature, proto, proto_signature)
if func_signature.return_annotation != proto_signature.return_annotation:
raise PrototypeError(func, func_signature, proto, proto_signature)
return func
return decorator
| 27.681159
| 97
| 0.704712
|
__all__ = [
"prototype",
]
import sys
from inspect import (
signature,
)
from typing import (
TypeVar,
Callable,
)
from .exceptions import (
PrototypeError,
)
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
Parameters = ParamSpec("Parameters")
ReturnType = TypeVar("ReturnType")
def prototype(
proto: Callable[Parameters, ReturnType],
/,
*,
runtime: bool = True,
) -> Callable[Parameters, ReturnType]:
def decorator(func: Callable[Parameters, ReturnType], /) -> Callable[Parameters, ReturnType]:
if runtime is True:
func_signature = signature(func)
proto_signature = signature(proto)
if func_signature.parameters != proto_signature.parameters:
raise PrototypeError(func, func_signature, proto, proto_signature)
if func_signature.return_annotation != proto_signature.return_annotation:
raise PrototypeError(func, func_signature, proto, proto_signature)
return func
return decorator
| true
| true
|
f70178cbf4ef5105a30edcd3a5daa6e7f179211c
| 571
|
py
|
Python
|
chapter9_Computer-Vision/Deep-Dream/util.py
|
kumi123/pytorch-learning
|
29f5b4d53f4e72b95b3fab979b1bc496ef23674c
|
[
"MIT"
] | null | null | null |
chapter9_Computer-Vision/Deep-Dream/util.py
|
kumi123/pytorch-learning
|
29f5b4d53f4e72b95b3fab979b1bc496ef23674c
|
[
"MIT"
] | null | null | null |
chapter9_Computer-Vision/Deep-Dream/util.py
|
kumi123/pytorch-learning
|
29f5b4d53f4e72b95b3fab979b1bc496ef23674c
|
[
"MIT"
] | null | null | null |
import PIL.Image
from io import BytesIO
from IPython.display import clear_output, Image, display
import numpy as np
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def showtensor(a):
mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
inp = a[0, :, :, :]
inp = inp.transpose(1, 2, 0)
inp = std * inp + mean
inp *= 255
showarray(inp)
clear_output(wait=True)
| 24.826087
| 61
| 0.607706
|
import PIL.Image
from io import BytesIO
from IPython.display import clear_output, Image, display
import numpy as np
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def showtensor(a):
mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
inp = a[0, :, :, :]
inp = inp.transpose(1, 2, 0)
inp = std * inp + mean
inp *= 255
showarray(inp)
clear_output(wait=True)
| true
| true
|
f70179383f15ccce81946c10265dbb68c2fdd06a
| 2,084
|
py
|
Python
|
novelsave/settings.py
|
damare01/novelsave
|
7896e8393c944e169e3cb52a33ab81ae396dff9f
|
[
"Apache-2.0"
] | 12
|
2021-08-25T04:37:53.000Z
|
2022-01-28T03:06:17.000Z
|
novelsave/settings.py
|
damare01/novelsave
|
7896e8393c944e169e3cb52a33ab81ae396dff9f
|
[
"Apache-2.0"
] | 18
|
2021-08-24T20:02:16.000Z
|
2022-03-29T06:55:53.000Z
|
novelsave/settings.py
|
damare01/novelsave
|
7896e8393c944e169e3cb52a33ab81ae396dff9f
|
[
"Apache-2.0"
] | 6
|
2021-10-03T11:31:08.000Z
|
2022-03-29T07:28:49.000Z
|
import mimetypes
from pathlib import Path
from appdirs import user_config_dir
from tqdm import tqdm
NAME = "novelsave"
AUTHOR = "Mensch272"
# base project directory
BASE_DIR = Path(__file__).resolve().parent.parent
STATIC_DIR = BASE_DIR / "novelsave/resources"
# operating system specific configuration file
# config directory is used to place logs, config, cache
CONFIG_DIR = Path(user_config_dir(NAME, AUTHOR))
CONFIG_FILE = CONFIG_DIR / "config.json"
DATA_DIR = CONFIG_DIR / "data"
DATABASE_FILE = (CONFIG_DIR / "data.sqlite").resolve()
DATABASE_URL = "sqlite:///" + str(DATABASE_FILE)
# default novel directory, where packaged files such
# as epub and pdf are stored.
NOVEL_DIR = Path.home() / "novels"
# the following map defines how files are stored
# by further subdivision into sub-folders
DIVISION_RULES = {
k: v.split("/", maxsplit=1)[0] for k, v in mimetypes.types_map.items()
}
def console_formatter(record):
if record["level"].name == "INFO":
return "{message}\n"
else:
return "<level>{level}: {message}</level>\n"
LOGGER_CONFIG = {
"handlers": [
{
"sink": lambda msg: tqdm.write(msg, end=""),
"format": console_formatter,
"level": "INFO",
"colorize": True,
"backtrace": False,
"diagnose": False,
},
{
"sink": CONFIG_DIR / "logs" / "{time}.log",
"level": "TRACE",
"retention": "2 days",
"compression": "zip",
"encoding": "utf-8",
},
],
}
TQDM_CONFIG = {"ncols": 80, "bar_format": "{percentage:3.0f}% |{bar}{r_bar}"}
config = {
"name": NAME,
"author": AUTHOR,
"base_dir": BASE_DIR,
"static": {
"dir": STATIC_DIR,
},
"config": {
"dir": CONFIG_DIR,
"file": CONFIG_FILE,
},
"data": {
"dir": DATA_DIR,
"division_rules": DIVISION_RULES,
},
"novel": {
"dir": NOVEL_DIR,
},
"infrastructure": {
"database": {
"url": DATABASE_URL,
}
},
}
| 23.41573
| 77
| 0.580134
|
import mimetypes
from pathlib import Path
from appdirs import user_config_dir
from tqdm import tqdm
NAME = "novelsave"
AUTHOR = "Mensch272"
BASE_DIR = Path(__file__).resolve().parent.parent
STATIC_DIR = BASE_DIR / "novelsave/resources"
CONFIG_DIR = Path(user_config_dir(NAME, AUTHOR))
CONFIG_FILE = CONFIG_DIR / "config.json"
DATA_DIR = CONFIG_DIR / "data"
DATABASE_FILE = (CONFIG_DIR / "data.sqlite").resolve()
DATABASE_URL = "sqlite:///" + str(DATABASE_FILE)
NOVEL_DIR = Path.home() / "novels"
DIVISION_RULES = {
k: v.split("/", maxsplit=1)[0] for k, v in mimetypes.types_map.items()
}
def console_formatter(record):
if record["level"].name == "INFO":
return "{message}\n"
else:
return "<level>{level}: {message}</level>\n"
LOGGER_CONFIG = {
"handlers": [
{
"sink": lambda msg: tqdm.write(msg, end=""),
"format": console_formatter,
"level": "INFO",
"colorize": True,
"backtrace": False,
"diagnose": False,
},
{
"sink": CONFIG_DIR / "logs" / "{time}.log",
"level": "TRACE",
"retention": "2 days",
"compression": "zip",
"encoding": "utf-8",
},
],
}
TQDM_CONFIG = {"ncols": 80, "bar_format": "{percentage:3.0f}% |{bar}{r_bar}"}
config = {
"name": NAME,
"author": AUTHOR,
"base_dir": BASE_DIR,
"static": {
"dir": STATIC_DIR,
},
"config": {
"dir": CONFIG_DIR,
"file": CONFIG_FILE,
},
"data": {
"dir": DATA_DIR,
"division_rules": DIVISION_RULES,
},
"novel": {
"dir": NOVEL_DIR,
},
"infrastructure": {
"database": {
"url": DATABASE_URL,
}
},
}
| true
| true
|
f70179c22847bf238ffe9a36a60ad35690b1309c
| 1,775
|
py
|
Python
|
newschimp/core.py
|
sputnikus/newschimp
|
8de77e4eb8054ea599b73f9c99fb494818736445
|
[
"MIT"
] | 2
|
2016-03-11T01:14:27.000Z
|
2016-09-13T16:49:15.000Z
|
newschimp/core.py
|
sputnikus/newschimp
|
8de77e4eb8054ea599b73f9c99fb494818736445
|
[
"MIT"
] | 3
|
2021-03-22T17:12:24.000Z
|
2021-12-13T19:39:41.000Z
|
newschimp/core.py
|
sputnikus/newschimp
|
8de77e4eb8054ea599b73f9c99fb494818736445
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import sys
import click
from newschimp import renderer, sender
from newschimp.social import fb, gg, lanyrd
from newschimp.cli import cli_group
from newschimp.utils import ComplexCLI, load_settings
LOGGER = logging.getLogger(__name__)
def create_newsletter(settings):
"""Newsletter creation based on config and env variables"""
context = {}
try:
fb_posts = fb.get_posts(settings, os.environ['FACEBOOK_TOKEN'], None)
except KeyError:
LOGGER.error('Facebook Token not defined')
sys.exit()
click.echo('[1/4] Getting Facebook Group posts')
context['fb'] = fb.curate(fb_posts)
ggroup_posts = gg.get_posts(settings, None)
click.echo('[2/4] Getting Google Group posts')
context['gg'] = gg.curate(ggroup_posts)
click.echo('[3/4] Getting upcoming Lanyrd meetups')
context['meetups'] = lanyrd.meetup_loop(settings)
click.echo('[4/4] Rendering mail')
renderer.render_files(settings, None, context)
click.confirm(
'Content is rendered, would you like to send it now?', abort=True)
click.echo('Creating MailChimp campaign')
sender.new_campaign(settings, os.environ.get('MAILCHIMP_KEY'))
cli_group.add_command(fb.cli)
cli_group.add_command(gg.cli)
cli_group.add_command(lanyrd.cli)
@cli_group.command(cls=ComplexCLI, invoke_without_command=True)
@click.option('--config', help='Custom config file', type=click.Path(
exists=True, file_okay=True, resolve_path=True), default='config.yaml')
@click.pass_context
def main(ctx, config):
ctx.obj['SETTINGS'] = load_settings(config)
if ctx.invoked_subcommand is None:
create_newsletter(ctx.obj['SETTINGS'])
if __name__ == '__main__':
main(obj={})
| 31.696429
| 77
| 0.71662
|
import logging
import os
import sys
import click
from newschimp import renderer, sender
from newschimp.social import fb, gg, lanyrd
from newschimp.cli import cli_group
from newschimp.utils import ComplexCLI, load_settings
LOGGER = logging.getLogger(__name__)
def create_newsletter(settings):
context = {}
try:
fb_posts = fb.get_posts(settings, os.environ['FACEBOOK_TOKEN'], None)
except KeyError:
LOGGER.error('Facebook Token not defined')
sys.exit()
click.echo('[1/4] Getting Facebook Group posts')
context['fb'] = fb.curate(fb_posts)
ggroup_posts = gg.get_posts(settings, None)
click.echo('[2/4] Getting Google Group posts')
context['gg'] = gg.curate(ggroup_posts)
click.echo('[3/4] Getting upcoming Lanyrd meetups')
context['meetups'] = lanyrd.meetup_loop(settings)
click.echo('[4/4] Rendering mail')
renderer.render_files(settings, None, context)
click.confirm(
'Content is rendered, would you like to send it now?', abort=True)
click.echo('Creating MailChimp campaign')
sender.new_campaign(settings, os.environ.get('MAILCHIMP_KEY'))
cli_group.add_command(fb.cli)
cli_group.add_command(gg.cli)
cli_group.add_command(lanyrd.cli)
@cli_group.command(cls=ComplexCLI, invoke_without_command=True)
@click.option('--config', help='Custom config file', type=click.Path(
exists=True, file_okay=True, resolve_path=True), default='config.yaml')
@click.pass_context
def main(ctx, config):
ctx.obj['SETTINGS'] = load_settings(config)
if ctx.invoked_subcommand is None:
create_newsletter(ctx.obj['SETTINGS'])
if __name__ == '__main__':
main(obj={})
| true
| true
|
f70179f8b9a5ed39f8e3e2daa4c9d420a7b278e5
| 1,044
|
py
|
Python
|
setup.py
|
vladiscripts/flightaware
|
bc3d25667475c8efbf6603cd93151e7e03b6d1b4
|
[
"MIT"
] | 1
|
2016-09-28T12:56:06.000Z
|
2016-09-28T12:56:06.000Z
|
setup.py
|
vladiscripts/flightaware
|
bc3d25667475c8efbf6603cd93151e7e03b6d1b4
|
[
"MIT"
] | null | null | null |
setup.py
|
vladiscripts/flightaware
|
bc3d25667475c8efbf6603cd93151e7e03b6d1b4
|
[
"MIT"
] | 1
|
2020-06-23T06:15:52.000Z
|
2020-06-23T06:15:52.000Z
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PACKAGE = "flightaware"
NAME = "flightaware"
DESCRIPTION = "A python REST interface for flightaware data"
AUTHOR = "Fred Palmer"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/fredpalmer/flightaware"
config = {
"description": DESCRIPTION,
"author": AUTHOR,
"url": URL,
"author_email": AUTHOR_EMAIL,
"version": "0.1",
"install_requires": [
"requests>=2.0.0",
"pytz"
],
"keywords": "travel flightaware airline flight flight-tracking flight-data",
"classifiers": [
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
],
"packages": [PACKAGE, ],
"scripts": [],
"name": NAME,
"license": "MIT",
}
setup(**config)
| 26.1
| 80
| 0.62069
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PACKAGE = "flightaware"
NAME = "flightaware"
DESCRIPTION = "A python REST interface for flightaware data"
AUTHOR = "Fred Palmer"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/fredpalmer/flightaware"
config = {
"description": DESCRIPTION,
"author": AUTHOR,
"url": URL,
"author_email": AUTHOR_EMAIL,
"version": "0.1",
"install_requires": [
"requests>=2.0.0",
"pytz"
],
"keywords": "travel flightaware airline flight flight-tracking flight-data",
"classifiers": [
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
],
"packages": [PACKAGE, ],
"scripts": [],
"name": NAME,
"license": "MIT",
}
setup(**config)
| true
| true
|
f7017a0b4139dd7ff8f485a857901a8d0d68104d
| 268
|
py
|
Python
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Quantization_Lag1Trend_30_12_20.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Quantization_Lag1Trend_30_12_20.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_30/ar_12/test_artificial_32_Quantization_Lag1Trend_30_12_20.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12);
| 38.285714
| 168
| 0.735075
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12);
| true
| true
|
f7017af6c3cdf3b84c5e4f104a6d25cb9b08d77a
| 9,779
|
py
|
Python
|
lithops/storage/backends/swift/swift.py
|
Damian-MG/lithops
|
d2d2a83527671d64a445411bc47843b308095b87
|
[
"Apache-2.0"
] | 55
|
2018-04-23T09:58:56.000Z
|
2020-09-09T11:47:16.000Z
|
lithops/storage/backends/swift/swift.py
|
Damian-MG/lithops
|
d2d2a83527671d64a445411bc47843b308095b87
|
[
"Apache-2.0"
] | 256
|
2018-05-20T13:01:51.000Z
|
2020-09-16T09:09:54.000Z
|
lithops/storage/backends/swift/swift.py
|
Damian-MG/lithops
|
d2d2a83527671d64a445411bc47843b308095b87
|
[
"Apache-2.0"
] | 35
|
2018-04-23T09:07:57.000Z
|
2020-08-12T13:43:06.000Z
|
#
# (C) Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import shutil
import logging
import requests
from lithops.storage.utils import StorageNoSuchKeyError
from lithops.utils import sizeof_fmt
from lithops.constants import STORAGE_CLI_MSG
logger = logging.getLogger(__name__)
class StorageBackend:
"""
A wrap-up around OpenStack Swift APIs.
"""
def __init__(self, swift_config):
logger.debug("Creating OpenStack Swift client")
self.auth_url = swift_config['swift_auth_url']
self.user_id = swift_config['swift_user_id']
self.project_id = swift_config['swift_project_id']
self.password = swift_config['swift_password']
self.region = swift_config['swift_region']
self.endpoint = None
if 'token' in swift_config:
self.token = swift_config['token']
self.endpoint = swift_config['endpoint']
else:
self.token = self.generate_swift_token()
swift_config['token'] = self.token
swift_config['endpoint'] = self.endpoint
self.session = requests.session()
self.session.headers.update({'X-Auth-Token': self.token})
adapter = requests.adapters.HTTPAdapter(pool_maxsize=64, max_retries=3)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
msg = STORAGE_CLI_MSG.format('OpenStack Swift')
logger.info("{} - Region: {}".format(msg, self.region))
def generate_swift_token(self):
"""
Generates new token for accessing to Swift.
:return: token
"""
url = self.auth_url+"/v3/auth/tokens"
headers = {'Content-Type': 'application/json'}
data = {"auth": {"identity": {"methods": ["password"],
"password": {"user": {"id": self.user_id, "password": self.password}}},
"scope": {"project": {"id": self.project_id}}}}
json_data = json.dumps(data)
r = requests.post(url, data=json_data, headers=headers)
if r.status_code == 201:
backend_info = json.loads(r.text)
for service in backend_info['token']['catalog']:
if service['name'] == 'swift':
for endpoint in service['endpoints']:
if endpoint['region'] == self.region:
if endpoint['interface'] == 'public':
self.endpoint = endpoint['url'].replace('https:', 'http:')
if not self.endpoint:
raise Exception('Invalid region name')
return r.headers['X-Subject-Token']
else:
message = json.loads(r.text)['error']['message']
raise Exception("{} - {} - {}".format(r.status_code, r.reason, message))
def put_object(self, container_name, key, data):
"""
Put an object in Swift. Override the object if the key already exists.
:param key: key of the object.
:param data: data of the object
:type data: str/bytes
:return: None
"""
url = '/'.join([self.endpoint, container_name, key])
try:
res = self.session.put(url, data=data)
status = 'OK' if res.status_code == 201 else 'Error'
try:
logger.debug('PUT Object {} - Size: {} - {}'.format(key, sizeof_fmt(len(data)), status))
except Exception:
logger.debug('PUT Object {} - {}'.format(key, status))
except Exception as e:
print(e)
def get_object(self, container_name, key, stream=False, extra_get_args={}):
"""
Get object from Swift with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
if not container_name:
container_name = self.storage_container
url = '/'.join([self.endpoint, container_name, key])
headers = {'X-Auth-Token': self.token}
headers.update(extra_get_args)
try:
res = self.session.get(url, headers=headers, stream=stream)
if res.status_code == 200 or res.status_code == 206:
if stream:
data = res.raw
else:
data = res.content
return data
elif res.status_code == 404:
raise StorageNoSuchKeyError(container_name, key)
else:
raise Exception('{} - {}'.format(res.status_code, key))
except StorageNoSuchKeyError:
raise StorageNoSuchKeyError(container_name, key)
except Exception as e:
print(e)
raise StorageNoSuchKeyError(container_name, key)
def upload_file(self, file_name, bucket, key=None, extra_args={}):
"""Upload a file
:param file_name: File to upload
:param bucket: Bucket to upload to
:param key: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 key was not specified, use file_name
if key is None:
key = os.path.basename(file_name)
# Upload the file
try:
with open(file_name, 'rb') as in_file:
self.put_object(bucket, key, in_file)
except Exception as e:
logging.error(e)
return False
return True
def download_file(self, bucket, key, file_name=None, extra_args={}):
"""Download a file
:param bucket: Bucket to download from
:param key: S3 object name. If not specified then file_name is used
:param file_name: File to upload
:return: True if file was downloaded, else False
"""
# If file_name was not specified, use S3 key
if file_name is None:
file_name = key
# Download the file
try:
dirname = os.path.dirname(file_name)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(file_name, 'wb') as out:
data_stream = self.get_object(bucket, key, stream=True)
shutil.copyfileobj(data_stream, out)
except Exception as e:
logging.error(e)
return False
return True
def head_object(self, container_name, key):
"""
Head object from Swift with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
url = '/'.join([self.endpoint, container_name, key])
try:
res = self.session.head(url)
if res.status_code == 200:
return res.headers
elif res.status_code == 404:
raise StorageNoSuchKeyError(container_name, key)
else:
raise Exception('{} - {}'.format(res.status_code, key))
except Exception as e:
raise StorageNoSuchKeyError(container_name, key)
def delete_object(self, container_name, key):
"""
Delete an object from Swift.
:param bucket: bucket name
:param key: data key
"""
url = '/'.join([self.endpoint, container_name, key])
return self.session.delete(url)
def delete_objects(self, container_name, key_list):
"""
Delete a list of objects from Swift.
:param bucket: bucket name
:param key: data key
"""
headers={'X-Auth-Token': self.token,
'X-Bulk-Delete': 'True'}
keys_to_delete = []
for key in key_list:
keys_to_delete.append('/{}/{}'.format(container_name, key))
keys_to_delete = '\n'.join(keys_to_delete)
url = '/'.join([self.endpoint, '?bulk-delete'])
return self.session.delete(url, data=keys_to_delete, headers=headers)
def list_objects(self, container_name, prefix=''):
"""
Lists the objects in a bucket. Throws StorageNoSuchKeyError if the given bucket does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
if prefix:
url = '/'.join([self.endpoint, container_name, '?format=json&prefix='+prefix])
else:
url = '/'.join([self.endpoint, container_name, '?format=json'])
try:
res = self.session.get(url)
objects = res.json()
# TODO: Adapt to Key and Size
return objects
except Exception as e:
raise e
def list_keys(self, container_name, prefix):
"""
Return a list of keys for the given prefix.
:param prefix: Prefix to filter object names.
:return: List of keys in bucket that match the given prefix.
:rtype: list of str
"""
try:
objects = self.list_objects(container_name, prefix)
object_keys = [r['name'] for r in objects]
return object_keys
except Exception as e:
raise(e)
| 36.901887
| 109
| 0.583495
|
import os
import json
import shutil
import logging
import requests
from lithops.storage.utils import StorageNoSuchKeyError
from lithops.utils import sizeof_fmt
from lithops.constants import STORAGE_CLI_MSG
logger = logging.getLogger(__name__)
class StorageBackend:
def __init__(self, swift_config):
logger.debug("Creating OpenStack Swift client")
self.auth_url = swift_config['swift_auth_url']
self.user_id = swift_config['swift_user_id']
self.project_id = swift_config['swift_project_id']
self.password = swift_config['swift_password']
self.region = swift_config['swift_region']
self.endpoint = None
if 'token' in swift_config:
self.token = swift_config['token']
self.endpoint = swift_config['endpoint']
else:
self.token = self.generate_swift_token()
swift_config['token'] = self.token
swift_config['endpoint'] = self.endpoint
self.session = requests.session()
self.session.headers.update({'X-Auth-Token': self.token})
adapter = requests.adapters.HTTPAdapter(pool_maxsize=64, max_retries=3)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
msg = STORAGE_CLI_MSG.format('OpenStack Swift')
logger.info("{} - Region: {}".format(msg, self.region))
def generate_swift_token(self):
url = self.auth_url+"/v3/auth/tokens"
headers = {'Content-Type': 'application/json'}
data = {"auth": {"identity": {"methods": ["password"],
"password": {"user": {"id": self.user_id, "password": self.password}}},
"scope": {"project": {"id": self.project_id}}}}
json_data = json.dumps(data)
r = requests.post(url, data=json_data, headers=headers)
if r.status_code == 201:
backend_info = json.loads(r.text)
for service in backend_info['token']['catalog']:
if service['name'] == 'swift':
for endpoint in service['endpoints']:
if endpoint['region'] == self.region:
if endpoint['interface'] == 'public':
self.endpoint = endpoint['url'].replace('https:', 'http:')
if not self.endpoint:
raise Exception('Invalid region name')
return r.headers['X-Subject-Token']
else:
message = json.loads(r.text)['error']['message']
raise Exception("{} - {} - {}".format(r.status_code, r.reason, message))
def put_object(self, container_name, key, data):
url = '/'.join([self.endpoint, container_name, key])
try:
res = self.session.put(url, data=data)
status = 'OK' if res.status_code == 201 else 'Error'
try:
logger.debug('PUT Object {} - Size: {} - {}'.format(key, sizeof_fmt(len(data)), status))
except Exception:
logger.debug('PUT Object {} - {}'.format(key, status))
except Exception as e:
print(e)
def get_object(self, container_name, key, stream=False, extra_get_args={}):
if not container_name:
container_name = self.storage_container
url = '/'.join([self.endpoint, container_name, key])
headers = {'X-Auth-Token': self.token}
headers.update(extra_get_args)
try:
res = self.session.get(url, headers=headers, stream=stream)
if res.status_code == 200 or res.status_code == 206:
if stream:
data = res.raw
else:
data = res.content
return data
elif res.status_code == 404:
raise StorageNoSuchKeyError(container_name, key)
else:
raise Exception('{} - {}'.format(res.status_code, key))
except StorageNoSuchKeyError:
raise StorageNoSuchKeyError(container_name, key)
except Exception as e:
print(e)
raise StorageNoSuchKeyError(container_name, key)
def upload_file(self, file_name, bucket, key=None, extra_args={}):
if key is None:
key = os.path.basename(file_name)
try:
with open(file_name, 'rb') as in_file:
self.put_object(bucket, key, in_file)
except Exception as e:
logging.error(e)
return False
return True
def download_file(self, bucket, key, file_name=None, extra_args={}):
if file_name is None:
file_name = key
try:
dirname = os.path.dirname(file_name)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(file_name, 'wb') as out:
data_stream = self.get_object(bucket, key, stream=True)
shutil.copyfileobj(data_stream, out)
except Exception as e:
logging.error(e)
return False
return True
def head_object(self, container_name, key):
url = '/'.join([self.endpoint, container_name, key])
try:
res = self.session.head(url)
if res.status_code == 200:
return res.headers
elif res.status_code == 404:
raise StorageNoSuchKeyError(container_name, key)
else:
raise Exception('{} - {}'.format(res.status_code, key))
except Exception as e:
raise StorageNoSuchKeyError(container_name, key)
def delete_object(self, container_name, key):
url = '/'.join([self.endpoint, container_name, key])
return self.session.delete(url)
def delete_objects(self, container_name, key_list):
headers={'X-Auth-Token': self.token,
'X-Bulk-Delete': 'True'}
keys_to_delete = []
for key in key_list:
keys_to_delete.append('/{}/{}'.format(container_name, key))
keys_to_delete = '\n'.join(keys_to_delete)
url = '/'.join([self.endpoint, '?bulk-delete'])
return self.session.delete(url, data=keys_to_delete, headers=headers)
def list_objects(self, container_name, prefix=''):
if prefix:
url = '/'.join([self.endpoint, container_name, '?format=json&prefix='+prefix])
else:
url = '/'.join([self.endpoint, container_name, '?format=json'])
try:
res = self.session.get(url)
objects = res.json()
return objects
except Exception as e:
raise e
def list_keys(self, container_name, prefix):
try:
objects = self.list_objects(container_name, prefix)
object_keys = [r['name'] for r in objects]
return object_keys
except Exception as e:
raise(e)
| true
| true
|
f7017b8db9e3fad17b386ca08b520a4ce52d409f
| 7,307
|
py
|
Python
|
rpcore/render_stage.py
|
serkkz/RenderPipeline
|
cecd14632150f607c0a89401287f74c68d77b15d
|
[
"MIT"
] | null | null | null |
rpcore/render_stage.py
|
serkkz/RenderPipeline
|
cecd14632150f607c0a89401287f74c68d77b15d
|
[
"MIT"
] | null | null | null |
rpcore/render_stage.py
|
serkkz/RenderPipeline
|
cecd14632150f607c0a89401287f74c68d77b15d
|
[
"MIT"
] | null | null | null |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from rpcore.render_target import RenderTarget
from rpcore.loader import RPLoader
class RenderStage():
""" This class is the abstract class for all stages used in the pipeline.
It represents a part of the pipeline render process. Each stage specifies
which pipes it uses and which pipes it produces. A pipe can be seen as a
texture, which gets modified. E.g. the gbuffer pass produces the gbuffer
pipe, the ambient occlusion pass produces the occlusion pipe and so on. The
lighting pass can then specify which pipes it needs and compute the image.
Using a pipe system ensures that new techniques can be inserted easily,
without the other techniques even being aware of them """
required_inputs = []
required_pipes = []
produced_inputs = {}
produced_pipes = {}
produced_defines = {}
disabled = False
def __init__(self, pipeline):
""" Creates a new render stage """
self.stage_id = self.__class__.__name__
self._pipeline = pipeline
self._active = True
self._targets = {}
def create(self):
""" This method should setup the stage and create the pipes """
raise NotImplementedError()
def reload_shaders(self):
""" This method should set all required shaders, there should be no
shaders set in the create method, because the shader auto config is not
generated there """
pass
def set_shader_input(self, *args):
""" This method sets a shader input on all stages, which is mainly used
by the stage manager """
for target in self._targets.values():
target.set_shader_input(*args)
def set_shader_inputs(self, **kwargs):
""" This method sets shader inputs on all stages, which is mainly used
by the stage manager """
for target in self._targets.values():
target.set_shader_inputs(**kwargs)
def update(self):
""" This method gets called every frame, and can be overridden by render
stages to perform custom updates """
pass
@property
def active(self):
""" Returns whether *all* targets of the stage are active """
return self._active
@active.setter
def active(self, state):
""" Enables or disables this stage. In case the stage is disabled, it will
not get updated anymore, and all stages are distabled """
if self._active != state:
self._active = state
for target in self._targets.values():
target.active = self._active
def create_target(self, name):
""" Creates a new render target and binds it to this stage """
# Format the name like Plugin:Stage:Name, so it can be easily
# found in pstats below the plugin cagetory
name = self._get_plugin_id() + ":" + self.stage_id + ":" + name
if name in self._targets:
return self.error("Overriding existing target: " + name)
self._targets[name] = RenderTarget(name)
return self._targets[name]
def remove_target(self, target):
""" Removes a previously registered target. This unregisters the
target, as well as removing it from the list of assigned targets. """
target.remove()
target_key = None
for key, value_target in self._targets.items():
if target == value_target:
target_key = key
break
del self._targets[target_key]
def _get_shader_handle(self, path, *args):
""" Returns a handle to a Shader object, containing all sources passed
as arguments. The path argument will be used to locate shaders if no
absolute path is given. This is the internal method used in load_shader
and load_plugin_shader. """
assert len(args) > 0 and len(args) <= 3
path_args = []
for source in args:
for prefix in ("/$$rpconfig", "/$$rp/shader", "/$$rptemp"):
if prefix in source:
path_args.append(source)
break
else:
path_args.append(path.format(source))
# If only one shader is specified, assume its a postprocess fragment shader,
# and use the default vertex shader
if len(args) == 1:
path_args = ["/$$rp/shader/default_post_process.vert.glsl"] + path_args
return RPLoader.load_shader(*path_args)
def _get_plugin_id(self):
""" Returns the id of the plugin which created this stage. This is done
by extracting the name of the plugin from the module name """
if "rpcore.stages" in self.__class__.__module__:
return "render_pipeline_internal"
return str(self.__class__.__module__).split(".")[-2]
def load_shader(self, *args):
""" Loads a shader from the given args. If only one argument is passed,
the default template for the stage is loaded. If two arguments are
passed, the first argument should be the vertex shader and the second
argument should be the fragment shader. If three arguments are passed,
the order should be vertex, fragment, geometry """
return self._get_shader_handle("/$$rp/shader/{0}", *args)
def load_plugin_shader(self, *args):
""" Loads a shader from the plugin directory. This method is useful
for RenderStages created by plugins. For a description of the arguments,
see the load_shader function. """
shader_path = "rpplugins/" + self._get_plugin_id() + "/shader/{0}"
return self._get_shader_handle(shader_path, *args)
def handle_window_resize(self):
""" This method gets called when the window gets resized. By default,
this just resizes all render targets. """
self.set_dimensions()
for target in self._targets.values():
target.consider_resize()
def set_dimensions(self):
""" This method should set the dimensions on all targets which don't
have a relative constraint, and also the size of all images. This
is called after initialization, and when the window resized. """
pass
| 41.517045
| 84
| 0.66799
|
from rpcore.render_target import RenderTarget
from rpcore.loader import RPLoader
class RenderStage():
required_inputs = []
required_pipes = []
produced_inputs = {}
produced_pipes = {}
produced_defines = {}
disabled = False
def __init__(self, pipeline):
self.stage_id = self.__class__.__name__
self._pipeline = pipeline
self._active = True
self._targets = {}
def create(self):
raise NotImplementedError()
def reload_shaders(self):
pass
def set_shader_input(self, *args):
for target in self._targets.values():
target.set_shader_input(*args)
def set_shader_inputs(self, **kwargs):
for target in self._targets.values():
target.set_shader_inputs(**kwargs)
def update(self):
pass
@property
def active(self):
return self._active
@active.setter
def active(self, state):
if self._active != state:
self._active = state
for target in self._targets.values():
target.active = self._active
def create_target(self, name):
name = self._get_plugin_id() + ":" + self.stage_id + ":" + name
if name in self._targets:
return self.error("Overriding existing target: " + name)
self._targets[name] = RenderTarget(name)
return self._targets[name]
def remove_target(self, target):
target.remove()
target_key = None
for key, value_target in self._targets.items():
if target == value_target:
target_key = key
break
del self._targets[target_key]
def _get_shader_handle(self, path, *args):
assert len(args) > 0 and len(args) <= 3
path_args = []
for source in args:
for prefix in ("/$$rpconfig", "/$$rp/shader", "/$$rptemp"):
if prefix in source:
path_args.append(source)
break
else:
path_args.append(path.format(source))
if len(args) == 1:
path_args = ["/$$rp/shader/default_post_process.vert.glsl"] + path_args
return RPLoader.load_shader(*path_args)
def _get_plugin_id(self):
if "rpcore.stages" in self.__class__.__module__:
return "render_pipeline_internal"
return str(self.__class__.__module__).split(".")[-2]
def load_shader(self, *args):
return self._get_shader_handle("/$$rp/shader/{0}", *args)
def load_plugin_shader(self, *args):
shader_path = "rpplugins/" + self._get_plugin_id() + "/shader/{0}"
return self._get_shader_handle(shader_path, *args)
def handle_window_resize(self):
self.set_dimensions()
for target in self._targets.values():
target.consider_resize()
def set_dimensions(self):
pass
| true
| true
|
f7017bfdb0cd9b92d160e04e3ef909806fba71cd
| 5,304
|
py
|
Python
|
tests/integration/actions/inventory/base.py
|
LaudateCorpus1/ansible-navigator
|
28cdea13dba3e9039382eb993989db4b3e61b237
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/actions/inventory/base.py
|
LaudateCorpus1/ansible-navigator
|
28cdea13dba3e9039382eb993989db4b3e61b237
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/actions/inventory/base.py
|
LaudateCorpus1/ansible-navigator
|
28cdea13dba3e9039382eb993989db4b3e61b237
|
[
"Apache-2.0"
] | null | null | null |
"""Base class for inventory interactive/stdout tests.
"""
import difflib
import json
import os
import pytest
from ....defaults import FIXTURES_DIR
from ..._common import fixture_path_from_request
from ..._common import update_fixtures
from ..._interactions import SearchFor
from ..._interactions import Step
from ..._tmux_session import TmuxSession
TEST_FIXTURE_DIR = os.path.join(FIXTURES_DIR, "integration", "actions", "inventory")
ANSIBLE_INVENTORY_FIXTURE_DIR = os.path.join(TEST_FIXTURE_DIR, "ansible_inventory", "inventory.yml")
TEST_CONFIG_FILE = os.path.join(TEST_FIXTURE_DIR, "ansible-navigator.yml")
base_steps = (
Step(user_input=":0", comment="Browse hosts/ungrouped window"),
Step(user_input=":0", comment="Group list window"),
Step(user_input=":0", comment="group01 hosts detail window"),
Step(user_input=":0", comment="host0101 detail window"),
Step(user_input=":back", comment="Previous window (group01 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":1", comment="group02 hosts detail window"),
Step(user_input=":0", comment="host0201 detail window"),
Step(user_input=":back", comment="Previous window (group02 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":2", comment="group03 hosts detail window"),
Step(user_input=":0", comment="host0301 detail window"),
Step(user_input=":back", comment="Previous window (group03 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":back", comment="Previous window (Browse hosts/ungrouped window)"),
Step(user_input=":back", comment="Previous window (top window)"),
Step(user_input=":1", comment="Inventory hostname window"),
Step(user_input=":0", comment="host0101 detail window"),
Step(user_input=":back", comment="Previous window after host0101 (Inventory hostname window)"),
Step(user_input=":1", comment="host0201 detail window"),
Step(user_input=":back", comment="Previous window after host0201 (Inventory hostname window)"),
Step(user_input=":2", comment="host0301 detail window"),
)
class BaseClass:
"""base class for inventory interactive/stdout tests"""
UPDATE_FIXTURES = False
@staticmethod
@pytest.fixture(scope="module", name="tmux_session")
def fixture_tmux_session(request):
"""tmux fixture for this module"""
params = {
"setup_commands": [
"export ANSIBLE_DEVEL_WARNING=False",
"export ANSIBLE_DEPRECATION_WARNINGS=False",
],
"pane_height": "2000",
"pane_width": "500",
"config_path": TEST_CONFIG_FILE,
"unique_test_id": request.node.nodeid,
}
with TmuxSession(**params) as tmux_session:
yield tmux_session
def test(self, request, tmux_session, step):
"""Run the tests for inventory, mode and ``ee`` set in child class."""
assert os.path.exists(ANSIBLE_INVENTORY_FIXTURE_DIR)
assert os.path.exists(TEST_CONFIG_FILE)
if step.search_within_response is SearchFor.HELP:
search_within_response = ":help help"
elif step.search_within_response is SearchFor.PROMPT:
search_within_response = tmux_session.cli_prompt
else:
raise ValueError("test mode not set")
received_output = tmux_session.interaction(
value=step.user_input,
search_within_response=search_within_response,
)
if step.mask:
# mask out some configuration that is subject to change each run
mask = "X" * 50
for idx, line in enumerate(received_output):
if tmux_session.cli_prompt in line:
received_output[idx] = mask
fixtures_update_requested = (
self.UPDATE_FIXTURES
or os.environ.get("ANSIBLE_NAVIGATOR_UPDATE_TEST_FIXTURES") == "true"
and not any((step.look_fors, step.look_nots))
)
if fixtures_update_requested:
update_fixtures(
request,
step.step_index,
received_output,
step.comment,
additional_information={
"look_fors": step.look_fors,
"look_nots": step.look_nots,
"compared_fixture": not any((step.look_fors, step.look_nots)),
},
)
page = " ".join(received_output)
if step.look_fors:
assert all(look_for in page for look_for in step.look_fors)
if step.look_nots:
assert not any(look_not in page for look_not in step.look_nots)
if not any((step.look_fors, step.look_nots)):
dir_path, file_name = fixture_path_from_request(request, step.step_index)
with open(file=os.path.join(dir_path, file_name), encoding="utf-8") as infile:
expected_output = json.load(infile)["output"]
assert expected_output == received_output, "\n" + "\n".join(
difflib.unified_diff(expected_output, received_output, "expected", "received"),
)
| 41.76378
| 100
| 0.651584
|
import difflib
import json
import os
import pytest
from ....defaults import FIXTURES_DIR
from ..._common import fixture_path_from_request
from ..._common import update_fixtures
from ..._interactions import SearchFor
from ..._interactions import Step
from ..._tmux_session import TmuxSession
TEST_FIXTURE_DIR = os.path.join(FIXTURES_DIR, "integration", "actions", "inventory")
ANSIBLE_INVENTORY_FIXTURE_DIR = os.path.join(TEST_FIXTURE_DIR, "ansible_inventory", "inventory.yml")
TEST_CONFIG_FILE = os.path.join(TEST_FIXTURE_DIR, "ansible-navigator.yml")
base_steps = (
Step(user_input=":0", comment="Browse hosts/ungrouped window"),
Step(user_input=":0", comment="Group list window"),
Step(user_input=":0", comment="group01 hosts detail window"),
Step(user_input=":0", comment="host0101 detail window"),
Step(user_input=":back", comment="Previous window (group01 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":1", comment="group02 hosts detail window"),
Step(user_input=":0", comment="host0201 detail window"),
Step(user_input=":back", comment="Previous window (group02 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":2", comment="group03 hosts detail window"),
Step(user_input=":0", comment="host0301 detail window"),
Step(user_input=":back", comment="Previous window (group03 hosts detail window)"),
Step(user_input=":back", comment="Previous window (Group list window)"),
Step(user_input=":back", comment="Previous window (Browse hosts/ungrouped window)"),
Step(user_input=":back", comment="Previous window (top window)"),
Step(user_input=":1", comment="Inventory hostname window"),
Step(user_input=":0", comment="host0101 detail window"),
Step(user_input=":back", comment="Previous window after host0101 (Inventory hostname window)"),
Step(user_input=":1", comment="host0201 detail window"),
Step(user_input=":back", comment="Previous window after host0201 (Inventory hostname window)"),
Step(user_input=":2", comment="host0301 detail window"),
)
class BaseClass:
UPDATE_FIXTURES = False
@staticmethod
@pytest.fixture(scope="module", name="tmux_session")
def fixture_tmux_session(request):
params = {
"setup_commands": [
"export ANSIBLE_DEVEL_WARNING=False",
"export ANSIBLE_DEPRECATION_WARNINGS=False",
],
"pane_height": "2000",
"pane_width": "500",
"config_path": TEST_CONFIG_FILE,
"unique_test_id": request.node.nodeid,
}
with TmuxSession(**params) as tmux_session:
yield tmux_session
def test(self, request, tmux_session, step):
assert os.path.exists(ANSIBLE_INVENTORY_FIXTURE_DIR)
assert os.path.exists(TEST_CONFIG_FILE)
if step.search_within_response is SearchFor.HELP:
search_within_response = ":help help"
elif step.search_within_response is SearchFor.PROMPT:
search_within_response = tmux_session.cli_prompt
else:
raise ValueError("test mode not set")
received_output = tmux_session.interaction(
value=step.user_input,
search_within_response=search_within_response,
)
if step.mask:
mask = "X" * 50
for idx, line in enumerate(received_output):
if tmux_session.cli_prompt in line:
received_output[idx] = mask
fixtures_update_requested = (
self.UPDATE_FIXTURES
or os.environ.get("ANSIBLE_NAVIGATOR_UPDATE_TEST_FIXTURES") == "true"
and not any((step.look_fors, step.look_nots))
)
if fixtures_update_requested:
update_fixtures(
request,
step.step_index,
received_output,
step.comment,
additional_information={
"look_fors": step.look_fors,
"look_nots": step.look_nots,
"compared_fixture": not any((step.look_fors, step.look_nots)),
},
)
page = " ".join(received_output)
if step.look_fors:
assert all(look_for in page for look_for in step.look_fors)
if step.look_nots:
assert not any(look_not in page for look_not in step.look_nots)
if not any((step.look_fors, step.look_nots)):
dir_path, file_name = fixture_path_from_request(request, step.step_index)
with open(file=os.path.join(dir_path, file_name), encoding="utf-8") as infile:
expected_output = json.load(infile)["output"]
assert expected_output == received_output, "\n" + "\n".join(
difflib.unified_diff(expected_output, received_output, "expected", "received"),
)
| true
| true
|
f7017f4f4ad299560711cdd1fb4c0b007148e3da
| 5,747
|
py
|
Python
|
salt/states/libcloud_loadbalancer.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
salt/states/libcloud_loadbalancer.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/states/libcloud_loadbalancer.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
'''
Apache Libcloud Load Balancer State
===================================
Manage load balancers using libcloud
:codeauthor: ``Anthony Shaw <[email protected]>``
Apache Libcloud load balancer management for a full list
of supported clouds, see http://libcloud.readthedocs.io/en/latest/loadbalancer/supported_providers.html
Clouds include Amazon ELB, ALB, Google, Aliyun, CloudStack, Softlayer
.. versionadded:: 2018.3.0
:configuration:
This module uses a configuration profile for one or multiple Cloud providers
.. code-block:: yaml
libcloud_loadbalancer:
profile_test1:
driver: gce
key: GOOG0123456789ABCXYZ
secret: mysecret
profile_test2:
driver: alb
key: 12345
secret: mysecret
Example:
Using States to deploy a load balancer with extended arguments to specify region
.. code-block:: yaml
lb_test:
libcloud_loadbalancer.balancer_present:
- name: example
- port: 80
- protocol: http
- profile: google
- ex_region: us-east1
:depends: apache-libcloud
'''
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs
import salt.utils.compat
log = logging.getLogger(__name__)
def __virtual__():
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
def state_result(result, message, name, changes=None):
if changes is None:
changes = {}
return {'result': result,
'comment': message,
'name': name,
'changes': changes}
def balancer_present(name, port, protocol, profile, algorithm=None, members=None, **libcloud_kwargs):
'''
Ensures a load balancer is present.
:param name: Load Balancer name
:type name: ``str``
:param port: Port the load balancer should listen on, defaults to 80
:type port: ``str``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param profile: The profile key
:type profile: ``str``
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. See Algorithm type
in Libcloud documentation for a full listing.
:type algorithm: ``str``
:param members: An optional list of members to create on deployment
:type members: ``list`` of ``dict`` (ip, port)
'''
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) > 0:
return state_result(True, "Balancer already exists", name)
else:
starting_members = None
if members is not None:
starting_members = []
for m in members:
starting_members.append({'ip': m['ip'], 'port': m['port']})
balancer = __salt__['libcloud_loadbalancer.create_balancer'](
name, port, protocol,
profile, algorithm=algorithm,
members=starting_members,
**libcloud_kwargs)
return state_result(True, "Created new load balancer", name, balancer)
def balancer_absent(name, profile, **libcloud_kwargs):
'''
Ensures a load balancer is absent.
:param name: Load Balancer name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
'''
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) == 0:
return state_result(True, "Balancer already absent", name)
else:
result = __salt__['libcloud_loadbalancer.destroy_balancer'](match[0]['id'], profile, **libcloud_kwargs)
return state_result(result, "Deleted load balancer", name)
def member_present(ip, port, balancer_id, profile, **libcloud_kwargs):
'''
Ensure a load balancer member is present
:param ip: IP address for the new member
:type ip: ``str``
:param port: Port for the new member
:type port: ``int``
:param balancer_id: id of a load balancer you want to attach the member to
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
'''
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
return state_result(True, "Member already present", balancer_id)
member = __salt__['libcloud_loadbalancer.balancer_attach_member'](balancer_id, ip, port, profile, **libcloud_kwargs)
return state_result(True, "Member added to balancer, id: {0}".format(member['id']), balancer_id, member)
def member_absent(ip, port, balancer_id, profile, **libcloud_kwargs):
'''
Ensure a load balancer member is absent, based on IP and Port
:param ip: IP address for the member
:type ip: ``str``
:param port: Port for the member
:type port: ``int``
:param balancer_id: id of a load balancer you want to detach the member from
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
'''
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
result = __salt__['libcloud_loadbalancer.balancer_detach_member'](balancer_id, member['id'], profile, **libcloud_kwargs)
return state_result(result, "Member removed", balancer_id)
return state_result(True, "Member already absent", balancer_id)
| 31.576923
| 132
| 0.65669
|
from __future__ import absolute_import, unicode_literals, print_function
import logging
import salt.utils.compat
log = logging.getLogger(__name__)
def __virtual__():
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
def state_result(result, message, name, changes=None):
if changes is None:
changes = {}
return {'result': result,
'comment': message,
'name': name,
'changes': changes}
def balancer_present(name, port, protocol, profile, algorithm=None, members=None, **libcloud_kwargs):
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) > 0:
return state_result(True, "Balancer already exists", name)
else:
starting_members = None
if members is not None:
starting_members = []
for m in members:
starting_members.append({'ip': m['ip'], 'port': m['port']})
balancer = __salt__['libcloud_loadbalancer.create_balancer'](
name, port, protocol,
profile, algorithm=algorithm,
members=starting_members,
**libcloud_kwargs)
return state_result(True, "Created new load balancer", name, balancer)
def balancer_absent(name, profile, **libcloud_kwargs):
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) == 0:
return state_result(True, "Balancer already absent", name)
else:
result = __salt__['libcloud_loadbalancer.destroy_balancer'](match[0]['id'], profile, **libcloud_kwargs)
return state_result(result, "Deleted load balancer", name)
def member_present(ip, port, balancer_id, profile, **libcloud_kwargs):
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
return state_result(True, "Member already present", balancer_id)
member = __salt__['libcloud_loadbalancer.balancer_attach_member'](balancer_id, ip, port, profile, **libcloud_kwargs)
return state_result(True, "Member added to balancer, id: {0}".format(member['id']), balancer_id, member)
def member_absent(ip, port, balancer_id, profile, **libcloud_kwargs):
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
result = __salt__['libcloud_loadbalancer.balancer_detach_member'](balancer_id, member['id'], profile, **libcloud_kwargs)
return state_result(result, "Member removed", balancer_id)
return state_result(True, "Member already absent", balancer_id)
| true
| true
|
f7017fa3cd00892a2d9a04db6d620ac61486f985
| 7,245
|
py
|
Python
|
silex_client/utils/parameter_types.py
|
ArtFXDev/silex_client
|
657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50
|
[
"MIT"
] | 10
|
2021-09-21T03:26:45.000Z
|
2022-03-19T00:30:03.000Z
|
silex_client/utils/parameter_types.py
|
ArtFXDev/silex_dcc
|
657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50
|
[
"MIT"
] | 66
|
2021-09-17T09:54:23.000Z
|
2022-03-29T23:31:17.000Z
|
silex_client/utils/parameter_types.py
|
ArtFXDev/silex_dcc
|
657d594dcfec79e7c8f4053df9d4a5dbc0c9ac50
|
[
"MIT"
] | null | null | null |
import pathlib
from silex_client.utils.log import logger
class AnyParameter(object):
def __new__(cls, value):
return value
class CommandParameterMeta(type):
def __new__(cls, name: str, bases: tuple, dct: dict):
def serialize():
return {
"name": "parameter",
}
attributes = {
"serialize": serialize,
}
attributes.update(dct)
return super().__new__(cls, name, bases, attributes)
def get_default(self):
return None
def serialize(self):
return None
class TaskParameterMeta(CommandParameterMeta):
def __init__(self):
pass
def __new__(cls):
def serialize():
return {
"name": "task",
}
def get_default():
return ""
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "TaskParameter", (str,), attributes)
class IntArrayParameterMeta(CommandParameterMeta):
def __init__(self, size: int):
pass
def __new__(cls, size: int):
def __init__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = int(item)
self.extend(value)
def serialize():
return {
"name": "int_array",
"size": size,
}
def get_default():
return [0 for i in range(size)]
attributes = {
"__init__": __init__,
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "IntArrayParameter", (list,), attributes)
class RangeParameterMeta(CommandParameterMeta):
def __init__(self, start: int, end: int, increment: int = 1):
pass
def __new__(cls, start: int, end: int, increment: int = 1):
def serialize():
return {
"name": "range",
"start": start,
"end": end,
"increment": increment,
}
def get_default():
return start
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "RangeParameter", (int,), attributes)
class SelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "select", "options": options}
def get_default():
return list(options.values())[0] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "SelectParameter", (str,), attributes)
class RadioSelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "radio_select", "options": options}
def get_default():
return list(options.values())[0] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "RadioSelectParameter", (str,), attributes)
class MultipleSelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "multiple_select", "options": options}
def get_default():
return [list(options.values())[0]] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "SelectParameter", (list,), attributes)
# TODO: Replace this parameter with ListParameterMeta
class ListParameter(list):
def __init__(self, value):
logger.warning(
"Deprecation warning: The parameter type ListParameter is deprecated in favor if ListParameterMeta()"
)
data = value
if not isinstance(value, list):
data = [value]
self.extend(data)
class PathParameterMeta(CommandParameterMeta):
def __init__(self, extensions=None, multiple=False):
pass
def __new__(cls, extensions=None, multiple=False):
if extensions is None:
extensions = ["*"]
def __init_list__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = pathlib.Path(item)
self.extend(value)
def serialize():
return {
"name": "Path",
"extensions": extensions,
"multiple": multiple,
}
def get_default():
return None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
if multiple:
attributes["__init__"] = __init_list__
return super().__new__(cls, "PathParameter", (list,), attributes)
return super().__new__(
cls, "PathParameter", (type(pathlib.Path()),), attributes
)
class ListParameterMeta(CommandParameterMeta):
def __init__(self, parameter_type):
pass
def __new__(cls, parameter_type):
def __init__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = parameter_type(item)
self.extend(value)
def serialize():
item_type = None
if isinstance(parameter_type, CommandParameterMeta):
return parameter_type.serialize()
elif isinstance(parameter_type, type):
item_type = {"name": parameter_type.__name__}
return {"name": "list", "itemtype": item_type}
def get_default():
return []
attributes = {
"__init__": __init__,
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "ListParameter", (list,), attributes)
class TextParameterMeta(CommandParameterMeta):
def __init__(self, color=None):
pass
def __new__(cls, color=None):
def serialize():
return {"name": "text", "color": color}
def get_default():
return ""
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "ListParameter", (str,), attributes)
| 26.25
| 113
| 0.557902
|
import pathlib
from silex_client.utils.log import logger
class AnyParameter(object):
def __new__(cls, value):
return value
class CommandParameterMeta(type):
def __new__(cls, name: str, bases: tuple, dct: dict):
def serialize():
return {
"name": "parameter",
}
attributes = {
"serialize": serialize,
}
attributes.update(dct)
return super().__new__(cls, name, bases, attributes)
def get_default(self):
return None
def serialize(self):
return None
class TaskParameterMeta(CommandParameterMeta):
def __init__(self):
pass
def __new__(cls):
def serialize():
return {
"name": "task",
}
def get_default():
return ""
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "TaskParameter", (str,), attributes)
class IntArrayParameterMeta(CommandParameterMeta):
def __init__(self, size: int):
pass
def __new__(cls, size: int):
def __init__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = int(item)
self.extend(value)
def serialize():
return {
"name": "int_array",
"size": size,
}
def get_default():
return [0 for i in range(size)]
attributes = {
"__init__": __init__,
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "IntArrayParameter", (list,), attributes)
class RangeParameterMeta(CommandParameterMeta):
def __init__(self, start: int, end: int, increment: int = 1):
pass
def __new__(cls, start: int, end: int, increment: int = 1):
def serialize():
return {
"name": "range",
"start": start,
"end": end,
"increment": increment,
}
def get_default():
return start
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "RangeParameter", (int,), attributes)
class SelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "select", "options": options}
def get_default():
return list(options.values())[0] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "SelectParameter", (str,), attributes)
class RadioSelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "radio_select", "options": options}
def get_default():
return list(options.values())[0] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "RadioSelectParameter", (str,), attributes)
class MultipleSelectParameterMeta(CommandParameterMeta):
def __init__(self, *list_options, **options):
pass
def __new__(cls, *list_options, **options):
for unnamed_option in list_options:
options[unnamed_option] = unnamed_option
def serialize():
return {"name": "multiple_select", "options": options}
def get_default():
return [list(options.values())[0]] if options else None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "SelectParameter", (list,), attributes)
class ListParameter(list):
def __init__(self, value):
logger.warning(
"Deprecation warning: The parameter type ListParameter is deprecated in favor if ListParameterMeta()"
)
data = value
if not isinstance(value, list):
data = [value]
self.extend(data)
class PathParameterMeta(CommandParameterMeta):
def __init__(self, extensions=None, multiple=False):
pass
def __new__(cls, extensions=None, multiple=False):
if extensions is None:
extensions = ["*"]
def __init_list__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = pathlib.Path(item)
self.extend(value)
def serialize():
return {
"name": "Path",
"extensions": extensions,
"multiple": multiple,
}
def get_default():
return None
attributes = {
"serialize": serialize,
"get_default": get_default,
}
if multiple:
attributes["__init__"] = __init_list__
return super().__new__(cls, "PathParameter", (list,), attributes)
return super().__new__(
cls, "PathParameter", (type(pathlib.Path()),), attributes
)
class ListParameterMeta(CommandParameterMeta):
def __init__(self, parameter_type):
pass
def __new__(cls, parameter_type):
def __init__(self, value):
if not isinstance(value, list):
value = [value]
for index, item in enumerate(value):
value[index] = parameter_type(item)
self.extend(value)
def serialize():
item_type = None
if isinstance(parameter_type, CommandParameterMeta):
return parameter_type.serialize()
elif isinstance(parameter_type, type):
item_type = {"name": parameter_type.__name__}
return {"name": "list", "itemtype": item_type}
def get_default():
return []
attributes = {
"__init__": __init__,
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "ListParameter", (list,), attributes)
class TextParameterMeta(CommandParameterMeta):
def __init__(self, color=None):
pass
def __new__(cls, color=None):
def serialize():
return {"name": "text", "color": color}
def get_default():
return ""
attributes = {
"serialize": serialize,
"get_default": get_default,
}
return super().__new__(cls, "ListParameter", (str,), attributes)
| true
| true
|
f701801fce4e0791e2126e82c19b415fb5f428d4
| 761
|
py
|
Python
|
atcoder/abc/abc154_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/abc/abc154_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/abc/abc154_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
N = input()
L = len(N)
K = int(input())
dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(L + 1)]
dp[0][0][1] = 1
for i, x in zip(range(L), map(int, N)):
for k in range(K):
dp[i+1][k][0] += dp[i][k][0] # d == 0
if x == 0:
dp[i+1][k][1] += dp[i][k][1]
elif x > 0:
dp[i+1][k][0] += dp[i][k][1]
# d != 0
for d in range(1, 10):
dp[i+1][k+1][0] += dp[i][k][0]
if d == x:
dp[i+1][k+1][1] += dp[i][k][1]
elif d < x:
dp[i+1][k+1][0] += dp[i][k][1]
dp[i+1][K][0] += dp[i][K][0] # k == K and d == 0
if x == 0:
dp[i+1][K][1] += dp[i][K][1]
elif x > 0:
dp[i+1][K][0] += dp[i][K][1]
print(sum(dp[-1][K]))
| 29.269231
| 60
| 0.341656
|
N = input()
L = len(N)
K = int(input())
dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(L + 1)]
dp[0][0][1] = 1
for i, x in zip(range(L), map(int, N)):
for k in range(K):
dp[i+1][k][0] += dp[i][k][0] if x == 0:
dp[i+1][k][1] += dp[i][k][1]
elif x > 0:
dp[i+1][k][0] += dp[i][k][1]
for d in range(1, 10):
dp[i+1][k+1][0] += dp[i][k][0]
if d == x:
dp[i+1][k+1][1] += dp[i][k][1]
elif d < x:
dp[i+1][k+1][0] += dp[i][k][1]
dp[i+1][K][0] += dp[i][K][0] if x == 0:
dp[i+1][K][1] += dp[i][K][1]
elif x > 0:
dp[i+1][K][0] += dp[i][K][1]
print(sum(dp[-1][K]))
| true
| true
|
f70180b809d83d1f2352aa742abe4ed55f91163a
| 58
|
py
|
Python
|
config.py
|
marcelom/slask
|
5125d7e74932d5d0151323e935d9586cbc037f8f
|
[
"MIT"
] | 1
|
2015-01-28T06:05:56.000Z
|
2015-01-28T06:05:56.000Z
|
config.py
|
marcelom/slask
|
5125d7e74932d5d0151323e935d9586cbc037f8f
|
[
"MIT"
] | null | null | null |
config.py
|
marcelom/slask
|
5125d7e74932d5d0151323e935d9586cbc037f8f
|
[
"MIT"
] | null | null | null |
config = {
"username": 'slask',
"icon": ":poop:",
}
| 11.6
| 23
| 0.465517
|
config = {
"username": 'slask',
"icon": ":poop:",
}
| true
| true
|
f70180bffd70b17aea765e77dc37db1f7c18cbc5
| 12,388
|
py
|
Python
|
fudge/patcher.py
|
priya1puresoftware/fudge
|
ab5822901cde23618f0f9ab21ff82a077ea7718b
|
[
"MIT"
] | null | null | null |
fudge/patcher.py
|
priya1puresoftware/fudge
|
ab5822901cde23618f0f9ab21ff82a077ea7718b
|
[
"MIT"
] | null | null | null |
fudge/patcher.py
|
priya1puresoftware/fudge
|
ab5822901cde23618f0f9ab21ff82a077ea7718b
|
[
"MIT"
] | null | null | null |
"""Patching utilities for working with fake objects.
See :ref:`using-fudge` for common scenarios.
"""
__all__ = ['patch_object', 'with_patched_object', 'PatchHandler',
'patched_context', 'patch']
import sys
import fudge
from fudge.util import wraps
class patch(object):
"""A test decorator that patches importable names with :class:`fakes <Fake>`
Each fake is exposed as an argument to the test:
.. doctest::
:hide:
>>> import fudge
.. doctest::
>>> @fudge.patch('os.remove')
... def test(fake_remove):
... fake_remove.expects_call()
... # do stuff...
...
>>> test()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
Many paths can be patched at once:
.. doctest::
>>> @fudge.patch('os.remove',
... 'shutil.rmtree')
... def test(fake_remove, fake_rmtree):
... fake_remove.is_callable()
... # do stuff...
...
>>> test()
For convenience, the patch method calls
:func:`fudge.clear_calls`, :func:`fudge.verify`, and :func:`fudge.clear_expectations`. For that reason, you must manage all your fake objects within the test function itself.
.. note::
If you are using a unittest class, you cannot declare fakes
within ``setUp()`` unless you manually clear calls and clear
expectations. If you do that, you'll want to use the
:func:`fudge.with_fakes` decorator instead of ``@patch``.
"""
def __init__(self, *obj_paths):
self.obj_paths = obj_paths
def __call__(self, fn):
@wraps(fn)
def caller(*args, **kw):
fakes = self.__enter__()
if not isinstance(fakes, (tuple, list)):
fakes = [fakes]
args += tuple(fakes)
value = None
try:
value = fn(*args, **kw)
except:
etype, val, tb = sys.exc_info()
self.__exit__(etype, val, tb)
raise etype, val, tb
else:
self.__exit__(None, None, None)
return value
return caller
def __enter__(self):
fudge.clear_expectations()
fudge.clear_calls()
self.patches = []
all_fakes = []
for path in self.obj_paths:
try:
target, attr = path.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError(
"Need a valid target to patch. You supplied: %r"
% path)
fake = fudge.Fake(path)
all_fakes.append(fake)
self.patches.append(patch_object(target, attr, fake))
if len(all_fakes) == 1:
return all_fakes[0]
else:
return all_fakes
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if not exc_type:
fudge.verify()
finally:
for p in self.patches:
p.restore()
fudge.clear_expectations()
def with_patched_object(obj, attr_name, patched_value):
"""Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean
"""
def patcher(method):
@wraps(method)
def method_call(*m_args, **m_kw):
patched_obj = patch_object(obj, attr_name, patched_value)
try:
return method(*m_args, **m_kw)
finally:
patched_obj.restore()
return method_call
return patcher
class patched_context(object):
"""A context manager to patch an object temporarily during a `with statement`_ block.
This is a wrapper around :func:`fudge.patcher.patch_object`
.. lame, lame, cannot figure out how to apply __future__ to doctest
so this output is currently skipped
.. doctest:: python25
:options: +SKIP
>>> from fudge import patched_context
>>> class Session:
... state = 'clean'
...
>>> with patched_context(Session, "state", "dirty"): # doctest: +SKIP
... print Session.state
...
dirty
>>> print Session.state
clean
.. _with statement: http://www.python.org/dev/peps/pep-0343/
"""
def __init__(self, obj, attr_name, patched_value):
# note that a @contextmanager decorator would be simpler
# but it can't be used since a value cannot be yielded within a
# try/finally block which is needed to restore the object on finally.
self.patched_object = patch_object(obj, attr_name, patched_value)
def __enter__(self):
return self.patched_object
def __exit__(self, exc_type, exc_val, exc_tb):
self.patched_object.restore()
def patch_object(obj, attr_name, patched_value):
"""Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean'
"""
if isinstance(obj, (str, unicode)):
obj_path = adjusted_path = obj
done = False
exc = None
at_top_level = False
while not done:
try:
obj = __import__(adjusted_path)
done = True
except ImportError:
# Handle paths that traveerse object attributes.
# Such as: smtplib.SMTP.connect
# smtplib <- module to import
adjusted_path = adjusted_path.rsplit('.', 1)[0]
if not exc:
exc = sys.exc_info()
if at_top_level:
# We're at the top level module and it doesn't exist.
# Raise the first exception since it will make more sense:
etype, val, tb = exc
raise etype, val, tb
if not adjusted_path.count('.'):
at_top_level = True
for part in obj_path.split('.')[1:]:
obj = getattr(obj, part)
handle = PatchHandler(obj, attr_name)
handle.patch(patched_value)
return handle
class NonExistant(object):
"""Represents a non-existant value."""
class PatchHandler(object):
"""Low level patch handler that memorizes a patch so you can restore it later.
You can use more convenient wrappers :func:`with_patched_object` and :func:`patched_context`
"""
def __init__(self, orig_object, attr_name):
self.orig_object = orig_object
self.attr_name = attr_name
self.proxy_object = None
self.orig_value, self.is_local = self._get_original(self.orig_object,
self.attr_name)
self.getter_class, self.getter = self._handle_getter(self.orig_object,
self.attr_name)
def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
# Workaround for patching builtin objects:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object)
def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object)
def _find_class_for_attr(self, cls, attr):
if attr in cls.__dict__:
return cls
else:
for base in cls.__bases__:
if self._find_class_for_attr(base, attr) is not NonExistant:
return base
return NonExistant
def _get_original(self, orig_object, name):
try:
value = orig_object.__dict__[name]
is_local = True
except (AttributeError, KeyError):
value = getattr(orig_object, name, NonExistant)
is_local = False
if value is NonExistant:
raise AttributeError(
"%s does not have the attribute %r" % (orig_object, name))
return value, is_local
def _get_exact_original(self, orig_object, name):
if hasattr(orig_object, '__dict__'):
if name not in orig_object.__dict__:
# TODO: handle class objects, not just instance objects?
# This is only here for Class.property.__get__
if hasattr(orig_object, '__class__'):
cls = orig_object.__class__
orig_object = self._find_class_for_attr(cls, name)
return orig_object
def _handle_getter(self, orig_object, name):
getter_class, getter = None, None
exact_orig = self._get_exact_original(orig_object, name)
try:
ob = exact_orig.__dict__[name]
except (AttributeError, KeyError):
pass
else:
if hasattr(ob, '__get__'):
getter_class = exact_orig
getter = ob
return getter_class, getter
| 33.663043
| 179
| 0.544479
|
"""Patching utilities for working with fake objects.
See :ref:`using-fudge` for common scenarios.
"""
__all__ = ['patch_object', 'with_patched_object', 'PatchHandler',
'patched_context', 'patch']
import sys
import fudge
from fudge.util import wraps
class patch(object):
"""A test decorator that patches importable names with :class:`fakes <Fake>`
Each fake is exposed as an argument to the test:
.. doctest::
:hide:
>>> import fudge
.. doctest::
>>> @fudge.patch('os.remove')
... def test(fake_remove):
... fake_remove.expects_call()
... # do stuff...
...
>>> test()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
Many paths can be patched at once:
.. doctest::
>>> @fudge.patch('os.remove',
... 'shutil.rmtree')
... def test(fake_remove, fake_rmtree):
... fake_remove.is_callable()
... # do stuff...
...
>>> test()
For convenience, the patch method calls
:func:`fudge.clear_calls`, :func:`fudge.verify`, and :func:`fudge.clear_expectations`. For that reason, you must manage all your fake objects within the test function itself.
.. note::
If you are using a unittest class, you cannot declare fakes
within ``setUp()`` unless you manually clear calls and clear
expectations. If you do that, you'll want to use the
:func:`fudge.with_fakes` decorator instead of ``@patch``.
"""
def __init__(self, *obj_paths):
self.obj_paths = obj_paths
def __call__(self, fn):
@wraps(fn)
def caller(*args, **kw):
fakes = self.__enter__()
if not isinstance(fakes, (tuple, list)):
fakes = [fakes]
args += tuple(fakes)
value = None
try:
value = fn(*args, **kw)
except:
etype, val, tb = sys.exc_info()
self.__exit__(etype, val, tb)
raise etype, val, tb
else:
self.__exit__(None, None, None)
return value
return caller
def __enter__(self):
fudge.clear_expectations()
fudge.clear_calls()
self.patches = []
all_fakes = []
for path in self.obj_paths:
try:
target, attr = path.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError(
"Need a valid target to patch. You supplied: %r"
% path)
fake = fudge.Fake(path)
all_fakes.append(fake)
self.patches.append(patch_object(target, attr, fake))
if len(all_fakes) == 1:
return all_fakes[0]
else:
return all_fakes
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if not exc_type:
fudge.verify()
finally:
for p in self.patches:
p.restore()
fudge.clear_expectations()
def with_patched_object(obj, attr_name, patched_value):
"""Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean
"""
def patcher(method):
@wraps(method)
def method_call(*m_args, **m_kw):
patched_obj = patch_object(obj, attr_name, patched_value)
try:
return method(*m_args, **m_kw)
finally:
patched_obj.restore()
return method_call
return patcher
class patched_context(object):
"""A context manager to patch an object temporarily during a `with statement`_ block.
This is a wrapper around :func:`fudge.patcher.patch_object`
.. lame, lame, cannot figure out how to apply __future__ to doctest
so this output is currently skipped
.. doctest:: python25
:options: +SKIP
>>> from fudge import patched_context
>>> class Session:
... state = 'clean'
...
>>> with patched_context(Session, "state", "dirty"): # doctest: +SKIP
... print Session.state
...
dirty
>>> print Session.state
clean
.. _with statement: http://www.python.org/dev/peps/pep-0343/
"""
def __init__(self, obj, attr_name, patched_value):
# note that a @contextmanager decorator would be simpler
# but it can't be used since a value cannot be yielded within a
self.patched_object = patch_object(obj, attr_name, patched_value)
def __enter__(self):
return self.patched_object
def __exit__(self, exc_type, exc_val, exc_tb):
self.patched_object.restore()
def patch_object(obj, attr_name, patched_value):
"""Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean'
"""
if isinstance(obj, (str, unicode)):
obj_path = adjusted_path = obj
done = False
exc = None
at_top_level = False
while not done:
try:
obj = __import__(adjusted_path)
done = True
except ImportError:
adjusted_path = adjusted_path.rsplit('.', 1)[0]
if not exc:
exc = sys.exc_info()
if at_top_level:
etype, val, tb = exc
raise etype, val, tb
if not adjusted_path.count('.'):
at_top_level = True
for part in obj_path.split('.')[1:]:
obj = getattr(obj, part)
handle = PatchHandler(obj, attr_name)
handle.patch(patched_value)
return handle
class NonExistant(object):
"""Represents a non-existant value."""
class PatchHandler(object):
"""Low level patch handler that memorizes a patch so you can restore it later.
You can use more convenient wrappers :func:`with_patched_object` and :func:`patched_context`
"""
def __init__(self, orig_object, attr_name):
self.orig_object = orig_object
self.attr_name = attr_name
self.proxy_object = None
self.orig_value, self.is_local = self._get_original(self.orig_object,
self.attr_name)
self.getter_class, self.getter = self._handle_getter(self.orig_object,
self.attr_name)
def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object)
def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object)
def _find_class_for_attr(self, cls, attr):
if attr in cls.__dict__:
return cls
else:
for base in cls.__bases__:
if self._find_class_for_attr(base, attr) is not NonExistant:
return base
return NonExistant
def _get_original(self, orig_object, name):
try:
value = orig_object.__dict__[name]
is_local = True
except (AttributeError, KeyError):
value = getattr(orig_object, name, NonExistant)
is_local = False
if value is NonExistant:
raise AttributeError(
"%s does not have the attribute %r" % (orig_object, name))
return value, is_local
def _get_exact_original(self, orig_object, name):
if hasattr(orig_object, '__dict__'):
if name not in orig_object.__dict__:
if hasattr(orig_object, '__class__'):
cls = orig_object.__class__
orig_object = self._find_class_for_attr(cls, name)
return orig_object
def _handle_getter(self, orig_object, name):
getter_class, getter = None, None
exact_orig = self._get_exact_original(orig_object, name)
try:
ob = exact_orig.__dict__[name]
except (AttributeError, KeyError):
pass
else:
if hasattr(ob, '__get__'):
getter_class = exact_orig
getter = ob
return getter_class, getter
| false
| true
|
f7018100d9e5b0dbbe8bcdafc3c55ae95bd3df34
| 5,553
|
py
|
Python
|
datasets/vqa_v2.py
|
TopCoder2K/mdetr
|
aedfd63f550ae36d1477484c489a2aa438d10aa3
|
[
"Apache-2.0"
] | 2
|
2022-02-22T05:11:00.000Z
|
2022-03-30T18:59:50.000Z
|
datasets/vqa_v2.py
|
TopCoder2K/mdetr
|
aedfd63f550ae36d1477484c489a2aa438d10aa3
|
[
"Apache-2.0"
] | null | null | null |
datasets/vqa_v2.py
|
TopCoder2K/mdetr
|
aedfd63f550ae36d1477484c489a2aa438d10aa3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/ashkamath/mdetr/blob/main/datasets/gqa.py
"""
import json
from pathlib import Path
import torch
import torchvision
from transformers import RobertaTokenizerFast
from .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms
class VQAv2Detection(ModulatedDetection):
pass
class VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):
super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)
with open(ann_folder / "vqa2_answer2id.json", "r") as f:
self.answer2id = json.load(f)
with open(ann_folder / "vqa2_answer2id_by_type.json", "r") as f:
self.answer2id_by_type = json.load(f)
self.type2id = {"yes/no": 0, "number": 1, "other": 2}
def __getitem__(self, idx):
img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"]
questionId = coco_img["questionId"]
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
target["questionId"] = questionId
if coco_img["answer"] not in self.answer2id:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)
target["answer_type"] = torch.as_tensor(self.type2id[coco_img["answer_type"]], dtype=torch.long)
# util.misc.collate_fn requires to put 'answer' before every type of answer in target
if coco_img["answer"] not in self.answer2id_by_type["yes/no"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_yes/no"] = torch.as_tensor(
self.answer2id_by_type["yes/no"][answer] if coco_img["answer_type"] == "yes/no" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["number"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_number"] = torch.as_tensor(
self.answer2id_by_type["number"][answer] if coco_img["answer_type"] == "number" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["other"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_other"] = torch.as_tensor(
self.answer2id_by_type["other"][answer] if coco_img["answer_type"] == "other" else -100,
dtype=torch.long,
)
return img, target
def build(image_set, args):
# TODO: img or all?
img_dir = Path(args.coco_img_path)
assert img_dir.exists(), f"provided COCO img path {img_dir} does not exist"
tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)
if args.do_qa:
# Для vqa2 это не нужно:
# assert args.vqa2_split_type is not None
if image_set == "train":
datasets = []
for imset in ["train", "minival"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{imset}.json"
datasets.append(
VQAv2QuestionAnswering(
img_dir / "train2014" if imset == "train" else img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
)
return torch.utils.data.ConcatDataset(datasets)
elif image_set == "val":
# TODO: правильный ли ann_file?
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_minival.json"
return VQAv2QuestionAnswering(
img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
elif image_set in ["test", "testdev", "trainval"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{image_set}.json"
return VQAv2QuestionAnswering(
img_dir / "test2015",
ann_file,
transforms=make_coco_transforms("val", cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
else:
assert False, f"Unknown image set {image_set}"
| 39.664286
| 109
| 0.60886
|
import json
from pathlib import Path
import torch
import torchvision
from transformers import RobertaTokenizerFast
from .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms
class VQAv2Detection(ModulatedDetection):
pass
class VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):
super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)
with open(ann_folder / "vqa2_answer2id.json", "r") as f:
self.answer2id = json.load(f)
with open(ann_folder / "vqa2_answer2id_by_type.json", "r") as f:
self.answer2id_by_type = json.load(f)
self.type2id = {"yes/no": 0, "number": 1, "other": 2}
def __getitem__(self, idx):
img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"]
questionId = coco_img["questionId"]
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
target["questionId"] = questionId
if coco_img["answer"] not in self.answer2id:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)
target["answer_type"] = torch.as_tensor(self.type2id[coco_img["answer_type"]], dtype=torch.long)
if coco_img["answer"] not in self.answer2id_by_type["yes/no"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_yes/no"] = torch.as_tensor(
self.answer2id_by_type["yes/no"][answer] if coco_img["answer_type"] == "yes/no" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["number"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_number"] = torch.as_tensor(
self.answer2id_by_type["number"][answer] if coco_img["answer_type"] == "number" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["other"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_other"] = torch.as_tensor(
self.answer2id_by_type["other"][answer] if coco_img["answer_type"] == "other" else -100,
dtype=torch.long,
)
return img, target
def build(image_set, args):
img_dir = Path(args.coco_img_path)
assert img_dir.exists(), f"provided COCO img path {img_dir} does not exist"
tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)
if args.do_qa:
if image_set == "train":
datasets = []
for imset in ["train", "minival"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{imset}.json"
datasets.append(
VQAv2QuestionAnswering(
img_dir / "train2014" if imset == "train" else img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
)
return torch.utils.data.ConcatDataset(datasets)
elif image_set == "val":
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_minival.json"
return VQAv2QuestionAnswering(
img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
elif image_set in ["test", "testdev", "trainval"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{image_set}.json"
return VQAv2QuestionAnswering(
img_dir / "test2015",
ann_file,
transforms=make_coco_transforms("val", cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
else:
assert False, f"Unknown image set {image_set}"
| true
| true
|
f701820b9d872f8eac52d86e40473c2034a80a09
| 2,240
|
py
|
Python
|
tests/commands/test_admin.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | 1
|
2022-02-21T18:55:34.000Z
|
2022-02-21T18:55:34.000Z
|
tests/commands/test_admin.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
tests/commands/test_admin.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
from assertpy import assert_that
from httmock import HTTMock
from sahyun_bot.commands.admin import Index, Rank
from sahyun_bot.users_settings import UserRank
from tests.mock_customsforge import customsforge
def test_require_admin(commander, hook):
for command in ['!lock', '!index', '!rank']:
with commander.executest(hook, command, 'goodlikebot'):
hook.assert_silent_failure()
def test_lock_unlock(commander, hook):
with commander.executest(hook, '!lock'):
hook.assert_success('Bot is now in ADMIN only mode')
# even basic commands are unauthorized
with commander.executest(hook, '!time', 'goodlikebot'):
hook.assert_silent_failure()
with commander.executest(hook, '!lock'):
hook.assert_success('Bot no longer in ADMIN only mode')
# functionality restored
with commander.executest(hook, '!time', 'goodlikebot'):
hook.assert_success()
def test_index(tl, hook):
with HTTMock(customsforge), Index(tl=tl).executest(hook):
hook.assert_success('CDLCs indexed')
tl.set_use_elastic(False)
with HTTMock(customsforge), Index(tl=tl).executest(hook):
hook.assert_failure('CDLCs could not be indexed')
def test_rank(users, hook):
with Rank(us=users).executest(hook, args=''):
hook.assert_failure('Try !rank RANK NICK')
with Rank(us=users).executest(hook, args='just_rank'):
hook.assert_failure('Try !rank RANK NICK')
with Rank(us=users).executest(hook, args='BAD_RANK goodlikebot'):
hook.assert_failure('BAD_RANK is not a valid rank')
with Rank(us=users).executest(hook, args='BAN goodlikebot'), users._manual('goodlikebot'):
hook.assert_success('goodlikebot is now BAN')
assert_that(users.rank('goodlikebot')).is_equal_to(UserRank.BAN)
users.set_use_elastic(False)
with Rank(us=users).executest(hook, args='ADMIN goodlikebot'):
hook.assert_failure('Rank could not be set')
def test_rank_shorthand(commander, hook):
with commander.executest(hook, '!ban goodlikebot'), commander._users._manual('goodlikebot'):
hook.assert_success('goodlikebot is now BAN')
assert_that(commander._users.rank('goodlikebot')).is_equal_to(UserRank.BAN)
| 34.461538
| 96
| 0.710714
|
from assertpy import assert_that
from httmock import HTTMock
from sahyun_bot.commands.admin import Index, Rank
from sahyun_bot.users_settings import UserRank
from tests.mock_customsforge import customsforge
def test_require_admin(commander, hook):
for command in ['!lock', '!index', '!rank']:
with commander.executest(hook, command, 'goodlikebot'):
hook.assert_silent_failure()
def test_lock_unlock(commander, hook):
with commander.executest(hook, '!lock'):
hook.assert_success('Bot is now in ADMIN only mode')
with commander.executest(hook, '!time', 'goodlikebot'):
hook.assert_silent_failure()
with commander.executest(hook, '!lock'):
hook.assert_success('Bot no longer in ADMIN only mode')
with commander.executest(hook, '!time', 'goodlikebot'):
hook.assert_success()
def test_index(tl, hook):
with HTTMock(customsforge), Index(tl=tl).executest(hook):
hook.assert_success('CDLCs indexed')
tl.set_use_elastic(False)
with HTTMock(customsforge), Index(tl=tl).executest(hook):
hook.assert_failure('CDLCs could not be indexed')
def test_rank(users, hook):
with Rank(us=users).executest(hook, args=''):
hook.assert_failure('Try !rank RANK NICK')
with Rank(us=users).executest(hook, args='just_rank'):
hook.assert_failure('Try !rank RANK NICK')
with Rank(us=users).executest(hook, args='BAD_RANK goodlikebot'):
hook.assert_failure('BAD_RANK is not a valid rank')
with Rank(us=users).executest(hook, args='BAN goodlikebot'), users._manual('goodlikebot'):
hook.assert_success('goodlikebot is now BAN')
assert_that(users.rank('goodlikebot')).is_equal_to(UserRank.BAN)
users.set_use_elastic(False)
with Rank(us=users).executest(hook, args='ADMIN goodlikebot'):
hook.assert_failure('Rank could not be set')
def test_rank_shorthand(commander, hook):
with commander.executest(hook, '!ban goodlikebot'), commander._users._manual('goodlikebot'):
hook.assert_success('goodlikebot is now BAN')
assert_that(commander._users.rank('goodlikebot')).is_equal_to(UserRank.BAN)
| true
| true
|
f701836e504ea4264dcd26c30d57e1b8dca9025b
| 3,075
|
py
|
Python
|
gogamechen3/api/rpc/taskflow/__init__.py
|
lolizeppelin/gogamechen3
|
4ff06f9042f1bb0cc22e1cc0b342967a829ae0f8
|
[
"MIT"
] | null | null | null |
gogamechen3/api/rpc/taskflow/__init__.py
|
lolizeppelin/gogamechen3
|
4ff06f9042f1bb0cc22e1cc0b342967a829ae0f8
|
[
"MIT"
] | null | null | null |
gogamechen3/api/rpc/taskflow/__init__.py
|
lolizeppelin/gogamechen3
|
4ff06f9042f1bb0cc22e1cc0b342967a829ae0f8
|
[
"MIT"
] | null | null | null |
import os
import base64
from simpleutil.utils import digestutils
from goperation.filemanager import LocalFile
from goperation.manager.rpc.agent.application.taskflow.middleware import EntityMiddleware
from goperation.manager.rpc.agent.application.taskflow.database import Database
from goperation.manager.rpc.agent.application.taskflow.application import AppUpgradeFile
from goperation.manager.rpc.agent.application.taskflow.application import AppLocalBackupFile
from gogamechen3.api import gfile
class GogameMiddle(EntityMiddleware):
def __init__(self, entity, endpoint, objtype):
super(GogameMiddle, self).__init__(entity, endpoint)
self.objtype = objtype
self.databases = {}
self.waiter = None
class GogameDatabase(Database):
def __init__(self, **kwargs):
super(GogameDatabase, self).__init__(**kwargs)
self.database_id = kwargs.get('database_id')
self.source = kwargs.get('source')
self.rosource = kwargs.get('rosource')
self.subtype = kwargs.get('subtype')
self.ro_user = kwargs.get('ro_user')
self.ro_passwd = kwargs.get('ro_passwd')
class GogameAppFile(AppUpgradeFile):
def __init__(self, source, objtype, revertable=False, rollback=False,
stream=None):
super(GogameAppFile, self).__init__(source, revertable, rollback)
self.objtype = objtype
self.stream = stream
def post_check(self):
gfile.check(self.objtype, self.file)
def clean(self):
if self.stream:
os.remove(self.file)
def prepare(self, middleware=None, timeout=None):
if self.stream:
if len(self.stream) > 5000:
raise ValueError("Strem over size")
file_path = os.path.join('/tmp', '%s.zip' % self.source)
data = base64.b64decode(self.stream)
if digestutils.strmd5(data) != self.source:
raise ValueError('Md5 not match')
with open(file_path, 'wb') as f:
data = base64.b64decode(self.stream)
f.write(data)
self.localfile = LocalFile(file_path, self.source, len(data))
else:
self.localfile = middleware.filemanager.get(self.source, download=True, timeout=timeout)
try:
self.post_check()
except Exception:
localfile = self.localfile
self.localfile = None
if self.stream:
os.remove(localfile.path)
else:
middleware.filemanager.delete(self.source)
raise
class GogameAppBackupFile(AppLocalBackupFile):
def __init__(self, destination, objtype):
super(GogameAppBackupFile, self).__init__(destination,
exclude=gfile.CompressConfAndLogExcluder(),
topdir=False,
native=True)
self.objtype = objtype
def post_check(self):
gfile.check(self.objtype, self.file)
| 35.344828
| 100
| 0.626992
|
import os
import base64
from simpleutil.utils import digestutils
from goperation.filemanager import LocalFile
from goperation.manager.rpc.agent.application.taskflow.middleware import EntityMiddleware
from goperation.manager.rpc.agent.application.taskflow.database import Database
from goperation.manager.rpc.agent.application.taskflow.application import AppUpgradeFile
from goperation.manager.rpc.agent.application.taskflow.application import AppLocalBackupFile
from gogamechen3.api import gfile
class GogameMiddle(EntityMiddleware):
def __init__(self, entity, endpoint, objtype):
super(GogameMiddle, self).__init__(entity, endpoint)
self.objtype = objtype
self.databases = {}
self.waiter = None
class GogameDatabase(Database):
def __init__(self, **kwargs):
super(GogameDatabase, self).__init__(**kwargs)
self.database_id = kwargs.get('database_id')
self.source = kwargs.get('source')
self.rosource = kwargs.get('rosource')
self.subtype = kwargs.get('subtype')
self.ro_user = kwargs.get('ro_user')
self.ro_passwd = kwargs.get('ro_passwd')
class GogameAppFile(AppUpgradeFile):
def __init__(self, source, objtype, revertable=False, rollback=False,
stream=None):
super(GogameAppFile, self).__init__(source, revertable, rollback)
self.objtype = objtype
self.stream = stream
def post_check(self):
gfile.check(self.objtype, self.file)
def clean(self):
if self.stream:
os.remove(self.file)
def prepare(self, middleware=None, timeout=None):
if self.stream:
if len(self.stream) > 5000:
raise ValueError("Strem over size")
file_path = os.path.join('/tmp', '%s.zip' % self.source)
data = base64.b64decode(self.stream)
if digestutils.strmd5(data) != self.source:
raise ValueError('Md5 not match')
with open(file_path, 'wb') as f:
data = base64.b64decode(self.stream)
f.write(data)
self.localfile = LocalFile(file_path, self.source, len(data))
else:
self.localfile = middleware.filemanager.get(self.source, download=True, timeout=timeout)
try:
self.post_check()
except Exception:
localfile = self.localfile
self.localfile = None
if self.stream:
os.remove(localfile.path)
else:
middleware.filemanager.delete(self.source)
raise
class GogameAppBackupFile(AppLocalBackupFile):
def __init__(self, destination, objtype):
super(GogameAppBackupFile, self).__init__(destination,
exclude=gfile.CompressConfAndLogExcluder(),
topdir=False,
native=True)
self.objtype = objtype
def post_check(self):
gfile.check(self.objtype, self.file)
| true
| true
|
f7018457af97cde20bee525c029ee91da68c58d8
| 2,647
|
py
|
Python
|
Libraries/XML.py
|
CoolCat467/MineOS-Python3-Port
|
39a6b4c1fcca7165501c8c2c77c5e10b208830d8
|
[
"MIT"
] | null | null | null |
Libraries/XML.py
|
CoolCat467/MineOS-Python3-Port
|
39a6b4c1fcca7165501c8c2c77c5e10b208830d8
|
[
"MIT"
] | null | null | null |
Libraries/XML.py
|
CoolCat467/MineOS-Python3-Port
|
39a6b4c1fcca7165501c8c2c77c5e10b208830d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# XML API, for dealing with XML strings
# -*- coding: utf-8 -*-
__all__ = ['parseargs', 'collect']
'<users>\n\t<user>\n\t\t<id>1</id>\n\t\t<name>Fred</name>\n\t\t<salary>500000</salary>\n\t</user>\n\t<user>\n\t\t<id>1</id>\n\t\t<name>ScienceCat</name>\n\t\t<salary>500000</salary>\n\t</user>\n\t<user>\n\t\t<id>1</id>\n\t\t<name>Bob</name>\n\t\t<salary>500000</salary>\n\t</user>\n</users>'
xmlex = '<users>\n<user>\n<id>1</id>\n<name>Fred</name>\n<salary>500000</salary>\n</user>\n<user>\n<id>1</id>\n<name>ScienceCat</name>\n<salary>500000</salary>\n</user>\n<user>\n<id>1</id>\n<name>Bob</name>\n<salary>500000</salary>\n</user>\n</users>'
argex = 'cats="True and Sand" true=\'Cats two\' sand="graval"'
##import re
##import xml.etree.cElementTree as xml
def parseargs(string:str):
"""Split a given string into individual arguments, seperated into key:arg for <key>=(' or ")<arg>(same char as start)"""
arg = {}
# ([%-%w]+)=([\"'])(.-)%2
# '([\w]+)=([\"\'])(.*)'
# '([-\w]+)=([\"\']*)'
## pattern = re.compile('([\w]+)=([\"\'])(.*)')
## print(pattern)
## for match in re.findall(pattern, string):
## print(match)
parts = string.split(' ')
bkey = ''
buffer = ''
end = '"'
for part in parts:
if '=' in part:
key, vp = part.split('=')
if vp[0] in ('"', "'"):
end = vp[0]
if vp.endswith(end):
arg[key] = vp[1:-1]
else:
bkey = key
buffer += vp
elif part.endswith(end):
buffer += ' '+part
arg[bkey] = buffer[1:-1]
bkey, buffer = '', ''
else:
buffer += ' '+part
return arg
def collect(string:str):
stack = []
top = []
stack.append(top)
i, j = 0, 0
class elementTag:
def __init__(self, label, xargs, empty=0):
self.label = label
self.xargs = xargs
self.empty = empty
while True:
ni
h
c
lable
xarg
emtpy
if not ni:
break
text = string[i:ni-1]
if not text.find('^ '):
top.append(text)
if empty == '/':# empty element tag
top.append(elementTag(label, parseargs(xarg), 1))
elif c == '': # start tag
top = [elementTag(label, parseargs(xarg))]
stack.append(top)
else:
toclose = stack
if len(stack) < 1:
error(f'Nothing to close with {label}.')
elif toclose.label == label:
pass
| 28.771739
| 291
| 0.491878
|
__all__ = ['parseargs', 'collect']
xmlex = '<users>\n<user>\n<id>1</id>\n<name>Fred</name>\n<salary>500000</salary>\n</user>\n<user>\n<id>1</id>\n<name>ScienceCat</name>\n<salary>500000</salary>\n</user>\n<user>\n<id>1</id>\n<name>Bob</name>\n<salary>500000</salary>\n</user>\n</users>'
argex = 'cats="True and Sand" true=\'Cats two\' sand="graval"'
def parseargs(string:str):
arg = {}
# '([\w]+)=([\"\'])(.*)'
## pattern = re.compile('([\w]+)=([\"\'])(.*)')
parts = string.split(' ')
bkey = ''
buffer = ''
end = '"'
for part in parts:
if '=' in part:
key, vp = part.split('=')
if vp[0] in ('"', "'"):
end = vp[0]
if vp.endswith(end):
arg[key] = vp[1:-1]
else:
bkey = key
buffer += vp
elif part.endswith(end):
buffer += ' '+part
arg[bkey] = buffer[1:-1]
bkey, buffer = '', ''
else:
buffer += ' '+part
return arg
def collect(string:str):
stack = []
top = []
stack.append(top)
i, j = 0, 0
class elementTag:
def __init__(self, label, xargs, empty=0):
self.label = label
self.xargs = xargs
self.empty = empty
while True:
ni
h
c
lable
xarg
emtpy
if not ni:
break
text = string[i:ni-1]
if not text.find('^ '):
top.append(text)
if empty == '/':# empty element tag
top.append(elementTag(label, parseargs(xarg), 1))
elif c == '': # start tag
top = [elementTag(label, parseargs(xarg))]
stack.append(top)
else:
toclose = stack
if len(stack) < 1:
error(f'Nothing to close with {label}.')
elif toclose.label == label:
pass
| true
| true
|
f70184ed31796ae9bea068a1768eac058c99e2d0
| 3,165
|
py
|
Python
|
scripts/local_test_http_function.py
|
sanserg/fun-anomaly
|
8b07f3f393cd60dbcff3f3fa0bebe11ced5f6a5d
|
[
"Apache-2.0"
] | null | null | null |
scripts/local_test_http_function.py
|
sanserg/fun-anomaly
|
8b07f3f393cd60dbcff3f3fa0bebe11ced5f6a5d
|
[
"Apache-2.0"
] | null | null | null |
scripts/local_test_http_function.py
|
sanserg/fun-anomaly
|
8b07f3f393cd60dbcff3f3fa0bebe11ced5f6a5d
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import bif
from ai.functions import SimpleAnomaly
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
from custom import settings
EngineLogging.configure_console_logging(logging.DEBUG)
'''
# Replace with a credentials dictionary or provide a credentials
# Explore > Usage > Watson IOT Platform Analytics > Copy to clipboard
# Past contents in a json file.
'''
#with open('credentials_Monitor-Demo.json', encoding='utf-8') as F:
#with open('credentials.json', encoding='utf-8') as F:
with open('credentials_dev2.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
'''
Developing Test Pipelines
-------------------------
When creating a set of functions you can test how they these functions will
work together by creating a test pipeline.
'''
'''
Create a database object to access Watson IOT Platform Analytics DB.
'''
db = Database(credentials = credentials)
db_schema = None # set if you are not using the default
'''
To do anything with IoT Platform Analytics, you will need one or more entity type.
You can create entity types through the IoT Platform or using the python API as shown below.
The database schema is only needed if you are not using the default schema. You can also rename the timestamp.
'''
entity_name = 'Turbines'
# dash100462 Used in dev2
db_schema = 'dash100462'
# db_schema = None # replace if you are not using the default schema
db.drop_table(entity_name, schema = db_schema)
entity = EntityType(entity_name,db,
Column('TURBINE_ID',String(50)),
Column('TEMPERATURE',Float()),
Column('PRESSURE',Float()),
Column('VOLUME', Float()),
SimpleAnomaly(request='GET',
url='internal_test',
output_item = 'http_preload_done'),
bif.PythonExpression(expression='df["TEMPERATURE"]*df["PRESSURE"]',
output_name = 'VOLUME'),
**{
'_timestamp' : 'evt_timestamp',
'_db_schema' : db_schema
})
'''
When creating an EntityType object you will need to specify the name of the entity, the database
object that will contain entity data
After creating an EntityType you will need to register it so that it visible in the UI.
To also register the functions and constants associated with the entity type, specify
'publish_kpis' = True.
'''
entity.register(raise_error=False)
db.register_functions([SimpleAnomaly])
'''
To test the execution of kpi calculations defined for the entity type locally
use 'test_local_pipeline'.
A local test will not update the server job log or write kpi data to the AS data
lake. Instead kpi data is written to the local filesystem in csv form.
'''
entity.exec_local_pipeline()
'''
view entity data
'''
df = db.read_table(table_name=entity_name, schema=db_schema)
print(df.head())
| 36.37931
| 110
| 0.68752
|
import json
import logging
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import bif
from ai.functions import SimpleAnomaly
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
from custom import settings
EngineLogging.configure_console_logging(logging.DEBUG)
with open('credentials_dev2.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db = Database(credentials = credentials)
db_schema = None
entity_name = 'Turbines'
db_schema = 'dash100462'
db.drop_table(entity_name, schema = db_schema)
entity = EntityType(entity_name,db,
Column('TURBINE_ID',String(50)),
Column('TEMPERATURE',Float()),
Column('PRESSURE',Float()),
Column('VOLUME', Float()),
SimpleAnomaly(request='GET',
url='internal_test',
output_item = 'http_preload_done'),
bif.PythonExpression(expression='df["TEMPERATURE"]*df["PRESSURE"]',
output_name = 'VOLUME'),
**{
'_timestamp' : 'evt_timestamp',
'_db_schema' : db_schema
})
entity.register(raise_error=False)
db.register_functions([SimpleAnomaly])
entity.exec_local_pipeline()
df = db.read_table(table_name=entity_name, schema=db_schema)
print(df.head())
| true
| true
|
f70184f57fcc71d87ff6ce78f33efc207b7ef96b
| 4,810
|
py
|
Python
|
modoboa_amavis/factories.py
|
modoboa/modoboa-amavis
|
18e5a210ac2eb007ce28d70675f4188d93e1b822
|
[
"MIT"
] | 22
|
2015-05-01T09:09:11.000Z
|
2021-03-20T03:11:49.000Z
|
modoboa_amavis/factories.py
|
modoboa/modoboa-amavis
|
18e5a210ac2eb007ce28d70675f4188d93e1b822
|
[
"MIT"
] | 138
|
2015-04-30T16:59:47.000Z
|
2022-03-13T13:46:28.000Z
|
modoboa_amavis/factories.py
|
modoboa/modoboa-amavis
|
18e5a210ac2eb007ce28d70675f4188d93e1b822
|
[
"MIT"
] | 18
|
2015-05-05T10:27:23.000Z
|
2021-09-19T23:58:59.000Z
|
# -*- coding: utf-8 -*-
"""Amavis factories."""
from __future__ import unicode_literals
import datetime
import time
import factory
from . import models
from .utils import smart_bytes
SPAM_BODY = """X-Envelope-To: <{rcpt}>
X-Envelope-To-Blocked: <{rcpt}>
X-Quarantine-ID: <nq6ekd4wtXZg>
X-Spam-Flag: YES
X-Spam-Score: 1000.985
X-Spam-Level: ****************************************************************
X-Spam-Status: Yes, score=1000.985 tag=2 tag2=6.31 kill=6.31
tests=[ALL_TRUSTED=-1, GTUBE=1000, PYZOR_CHECK=1.985]
autolearn=no autolearn_force=no
Received: from demo.modoboa.org ([127.0.0.1])
by localhost (demo.modoboa.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id nq6ekd4wtXZg for <[email protected]>;
Thu, 9 Nov 2017 15:59:52 +0100 (CET)
Received: from demo.modoboa.org (localhost [127.0.0.1])
by demo.modoboa.org (Postfix) with ESMTP
for <[email protected]>; Thu, 9 Nov 2017 15:59:52 +0100 (CET)
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
Subject: Sample message
From: {sender}
To: {rcpt}
Message-ID: <[email protected]>
Date: Thu, 09 Nov 2017 15:59:52 +0100
This is the GTUBE, the
Generic
Test for
Unsolicited
Bulk
Email
If your spam filter supports it, the GTUBE provides a test by which you
can verify that the filter is installed correctly and is detecting incoming
spam. You can send yourself a test mail containing the following string of
characters (in upper case and with no white spaces and line breaks):
XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
You should send this test mail from an account outside of your network.
"""
VIRUS_BODY = """Subject: Virus Test Message (EICAR)
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="huq684BweRXVnRxX"
Content-Disposition: inline
Date: Sun, 06 Nov 2011 10:08:18 -0800
--huq684BweRXVnRxX
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This is a virus test message. It contains an attached file 'eicar.com',
which contains the EICAR virus <http://eicar.org/86-0-Intended-use.html>
test pattern.
--huq684BweRXVnRxX
Content-Type: application/x-msdos-program
Content-Disposition: attachment; filename="eicar.com"
Content-Transfer-Encoding: quoted-printable
X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*=0A
--huq684BweRXVnRxX--
"""
class MaddrFactory(factory.django.DjangoModelFactory):
"""Factory for Maddr."""
class Meta:
model = models.Maddr
django_get_or_create = ("email", )
id = factory.Sequence(lambda n: n) # NOQA:A003
email = factory.Sequence(lambda n: "user_{}@domain.test".format(n))
domain = "test.domain"
class MsgsFactory(factory.django.DjangoModelFactory):
"""Factory for Mailaddr."""
class Meta:
model = models.Msgs
mail_id = factory.Sequence(lambda n: "mailid{}".format(n))
secret_id = factory.Sequence(lambda n: smart_bytes("id{}".format(n)))
sid = factory.SubFactory(MaddrFactory)
client_addr = "127.0.0.1"
originating = "Y"
dsn_sent = "N"
subject = factory.Sequence(lambda n: "Test message {}".format(n))
time_num = factory.LazyAttribute(lambda o: int(time.time()))
time_iso = factory.LazyAttribute(
lambda o: datetime.datetime.fromtimestamp(o.time_num).isoformat())
size = 100
class MsgrcptFactory(factory.django.DjangoModelFactory):
"""Factory for Msgrcpt."""
class Meta:
model = models.Msgrcpt
rseqnum = 1
is_local = "Y"
bl = "N"
wl = "N"
mail = factory.SubFactory(MsgsFactory)
rid = factory.SubFactory(MaddrFactory)
class QuarantineFactory(factory.django.DjangoModelFactory):
"""Factory for Quarantine."""
class Meta:
model = models.Quarantine
chunk_ind = 1
mail = factory.SubFactory(MsgsFactory)
def create_quarantined_msg(rcpt, sender, rs, body, **kwargs):
"""Create a quarantined msg."""
msgrcpt = MsgrcptFactory(
rs=rs,
rid__email=rcpt,
rid__domain="com.test", # FIXME
mail__sid__email=smart_bytes(sender),
mail__sid__domain="", # FIXME
**kwargs
)
QuarantineFactory(
mail=msgrcpt.mail,
mail_text=smart_bytes(SPAM_BODY.format(rcpt=rcpt, sender=sender))
)
return msgrcpt
def create_spam(rcpt, sender="[email protected]", rs=" "):
"""Create a spam."""
body = SPAM_BODY.format(rcpt=rcpt, sender=sender)
body += "fóó bár"
return create_quarantined_msg(
rcpt, sender, rs, body, bspam_level=999.0, content="S")
def create_virus(rcpt, sender="[email protected]", rs=" "):
"""Create a virus."""
return create_quarantined_msg(rcpt, sender, rs, VIRUS_BODY, content="V")
| 28.802395
| 78
| 0.685863
|
from __future__ import unicode_literals
import datetime
import time
import factory
from . import models
from .utils import smart_bytes
SPAM_BODY = """X-Envelope-To: <{rcpt}>
X-Envelope-To-Blocked: <{rcpt}>
X-Quarantine-ID: <nq6ekd4wtXZg>
X-Spam-Flag: YES
X-Spam-Score: 1000.985
X-Spam-Level: ****************************************************************
X-Spam-Status: Yes, score=1000.985 tag=2 tag2=6.31 kill=6.31
tests=[ALL_TRUSTED=-1, GTUBE=1000, PYZOR_CHECK=1.985]
autolearn=no autolearn_force=no
Received: from demo.modoboa.org ([127.0.0.1])
by localhost (demo.modoboa.org [127.0.0.1]) (amavisd-new, port 10024)
with ESMTP id nq6ekd4wtXZg for <[email protected]>;
Thu, 9 Nov 2017 15:59:52 +0100 (CET)
Received: from demo.modoboa.org (localhost [127.0.0.1])
by demo.modoboa.org (Postfix) with ESMTP
for <[email protected]>; Thu, 9 Nov 2017 15:59:52 +0100 (CET)
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
Subject: Sample message
From: {sender}
To: {rcpt}
Message-ID: <[email protected]>
Date: Thu, 09 Nov 2017 15:59:52 +0100
This is the GTUBE, the
Generic
Test for
Unsolicited
Bulk
Email
If your spam filter supports it, the GTUBE provides a test by which you
can verify that the filter is installed correctly and is detecting incoming
spam. You can send yourself a test mail containing the following string of
characters (in upper case and with no white spaces and line breaks):
XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
You should send this test mail from an account outside of your network.
"""
VIRUS_BODY = """Subject: Virus Test Message (EICAR)
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="huq684BweRXVnRxX"
Content-Disposition: inline
Date: Sun, 06 Nov 2011 10:08:18 -0800
--huq684BweRXVnRxX
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This is a virus test message. It contains an attached file 'eicar.com',
which contains the EICAR virus <http://eicar.org/86-0-Intended-use.html>
test pattern.
--huq684BweRXVnRxX
Content-Type: application/x-msdos-program
Content-Disposition: attachment; filename="eicar.com"
Content-Transfer-Encoding: quoted-printable
X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*=0A
--huq684BweRXVnRxX--
"""
class MaddrFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Maddr
django_get_or_create = ("email", )
id = factory.Sequence(lambda n: n) email = factory.Sequence(lambda n: "user_{}@domain.test".format(n))
domain = "test.domain"
class MsgsFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Msgs
mail_id = factory.Sequence(lambda n: "mailid{}".format(n))
secret_id = factory.Sequence(lambda n: smart_bytes("id{}".format(n)))
sid = factory.SubFactory(MaddrFactory)
client_addr = "127.0.0.1"
originating = "Y"
dsn_sent = "N"
subject = factory.Sequence(lambda n: "Test message {}".format(n))
time_num = factory.LazyAttribute(lambda o: int(time.time()))
time_iso = factory.LazyAttribute(
lambda o: datetime.datetime.fromtimestamp(o.time_num).isoformat())
size = 100
class MsgrcptFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Msgrcpt
rseqnum = 1
is_local = "Y"
bl = "N"
wl = "N"
mail = factory.SubFactory(MsgsFactory)
rid = factory.SubFactory(MaddrFactory)
class QuarantineFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Quarantine
chunk_ind = 1
mail = factory.SubFactory(MsgsFactory)
def create_quarantined_msg(rcpt, sender, rs, body, **kwargs):
msgrcpt = MsgrcptFactory(
rs=rs,
rid__email=rcpt,
rid__domain="com.test", mail__sid__email=smart_bytes(sender),
mail__sid__domain="", **kwargs
)
QuarantineFactory(
mail=msgrcpt.mail,
mail_text=smart_bytes(SPAM_BODY.format(rcpt=rcpt, sender=sender))
)
return msgrcpt
def create_spam(rcpt, sender="[email protected]", rs=" "):
body = SPAM_BODY.format(rcpt=rcpt, sender=sender)
body += "fóó bár"
return create_quarantined_msg(
rcpt, sender, rs, body, bspam_level=999.0, content="S")
def create_virus(rcpt, sender="[email protected]", rs=" "):
return create_quarantined_msg(rcpt, sender, rs, VIRUS_BODY, content="V")
| true
| true
|
f701858d90987f1f596d9d74d126ce475a127ae0
| 2,414
|
py
|
Python
|
scripts/mkgti.py
|
ZaynabGhazi/NICERsoft
|
c1e467b807226f091e82cd0e3ab0ce6b7a476610
|
[
"MIT"
] | null | null | null |
scripts/mkgti.py
|
ZaynabGhazi/NICERsoft
|
c1e467b807226f091e82cd0e3ab0ce6b7a476610
|
[
"MIT"
] | null | null | null |
scripts/mkgti.py
|
ZaynabGhazi/NICERsoft
|
c1e467b807226f091e82cd0e3ab0ce6b7a476610
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function, division
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import argparse
from astropy import log
from os import path
from glob import glob
from subprocess import check_call
import shutil
from astropy.table import Table
from astropy.io import fits
from nicer.values import *
from nicer.plotutils import plot_light_curve
def runcmd(cmd):
# CMD should be a list of strings since it is not processed by a shell
log.info('CMD: '+" ".join(cmd))
os.system(" ".join(cmd))
## Some ftools calls don't work properly with check_call...not sure why!
## so I am using os.system instead of check_call
#check_call(cmd,env=os.environ)
################################################
# Checking the presence of HEASOFT
try:
check_call('nicerversion',env=os.environ)
except:
print("You need to initialize FTOOLS/HEASOFT first (e.g., type 'heainit')!", file=sys.stderr)
exit()
################################################
# Checking the presence of gti header and columns in data/
gticolumns = path.join(datadir,'gti_columns.txt')
gtiheader = path.join(datadir,'gti_header.txt')
if not os.path.isfile(gtiheader) or not os.path.isfile(gticolumns):
log.error('The files gti_header.txt or gti_columns.txt are missing. Check the {} directory'.format(os.path.abspath(datadir)))
exit()
desc = """
Create a simple GTI file from a pair of NICER METs. This is handy as an input file to niextract-events timefile=xxx.gti
"""
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("startmet", help="Starting MET for GTI", type=float)
parser.add_argument("stopmet", help="Ending MET for GTI", type=float)
parser.add_argument("--gtiname", help="Name of output GTI FITS file (default gti.fits)", default="gti.fits")
args = parser.parse_args()
################################################
## STEP 5 - dumping the TSTART and TEND into text file
import tempfile
fp = tempfile.NamedTemporaryFile()
fp.write('{0} {1}\n'.format(args.startmet,args.stopmet))
fp.flush()
################################################
## STEP 6 - Making the GTI file from the text file
log.info("Making the GTI file gti.fits from the GTI data textfile")
cmd = ['ftcreate', '{}'.format(gticolumns), fp.name, args.gtiname, 'headfile={}'.format(gtiheader), 'extname="GTI"', 'clobber=yes']
runcmd(cmd)
fp.close()
| 35.5
| 131
| 0.677713
|
from __future__ import print_function, division
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import argparse
from astropy import log
from os import path
from glob import glob
from subprocess import check_call
import shutil
from astropy.table import Table
from astropy.io import fits
from nicer.values import *
from nicer.plotutils import plot_light_curve
def runcmd(cmd):
log.info('CMD: '+" ".join(cmd))
os.system(" ".join(cmd))
## so I am using os.system instead of check_call
#check_call(cmd,env=os.environ)
################################################
# Checking the presence of HEASOFT
try:
check_call('nicerversion',env=os.environ)
except:
print("You need to initialize FTOOLS/HEASOFT first (e.g., type 'heainit')!", file=sys.stderr)
exit()
################################################
# Checking the presence of gti header and columns in data/
gticolumns = path.join(datadir,'gti_columns.txt')
gtiheader = path.join(datadir,'gti_header.txt')
if not os.path.isfile(gtiheader) or not os.path.isfile(gticolumns):
log.error('The files gti_header.txt or gti_columns.txt are missing. Check the {} directory'.format(os.path.abspath(datadir)))
exit()
desc = """
Create a simple GTI file from a pair of NICER METs. This is handy as an input file to niextract-events timefile=xxx.gti
"""
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("startmet", help="Starting MET for GTI", type=float)
parser.add_argument("stopmet", help="Ending MET for GTI", type=float)
parser.add_argument("--gtiname", help="Name of output GTI FITS file (default gti.fits)", default="gti.fits")
args = parser.parse_args()
################################################
## STEP 5 - dumping the TSTART and TEND into text file
import tempfile
fp = tempfile.NamedTemporaryFile()
fp.write('{0} {1}\n'.format(args.startmet,args.stopmet))
fp.flush()
################################################
## STEP 6 - Making the GTI file from the text file
log.info("Making the GTI file gti.fits from the GTI data textfile")
cmd = ['ftcreate', '{}'.format(gticolumns), fp.name, args.gtiname, 'headfile={}'.format(gtiheader), 'extname="GTI"', 'clobber=yes']
runcmd(cmd)
fp.close()
| true
| true
|
f70185a7b8ced19854c2172d3abd8153b1d14a41
| 4,268
|
py
|
Python
|
deplatformr/views/filecoin_views.py
|
deplatformer/prototype
|
d755624ef2828a9c4b99cad53cc6013e4572e4d2
|
[
"MIT"
] | null | null | null |
deplatformr/views/filecoin_views.py
|
deplatformer/prototype
|
d755624ef2828a9c4b99cad53cc6013e4572e4d2
|
[
"MIT"
] | 1
|
2021-02-10T02:26:02.000Z
|
2021-02-10T02:26:02.000Z
|
deplatformr/views/filecoin_views.py
|
deplatformer/prototype
|
d755624ef2828a9c4b99cad53cc6013e4572e4d2
|
[
"MIT"
] | 1
|
2021-06-28T14:42:56.000Z
|
2021-06-28T14:42:56.000Z
|
import os
from datetime import datetime
from flask import Flask, render_template, flash, safe_join, send_file
from flask_user import login_required, current_user
from werkzeug.utils import secure_filename
from pygate_grpc.client import PowerGateClient
from deplatformr.models.filecoin_models import Ffs, Files, Logs
from deplatformr import app, db
@app.route('/filecoin-files')
@login_required
def filecoin_files():
files = Files.query.filter_by(user_id=current_user.id).all()
return render_template("filecoin/filecoin-files.html", files=files, breadcrumb="Filecoin / Files")
@app.route("/filecoin-download/<cid>", methods=["GET"])
@login_required
def filecoin_download(cid):
"""
Retrieve a file from Filecoin via IPFS using Powergate and offer the user
the option to save it to their machine.
"""
# Retrieve File and FFS info using the CID
file = Files.query.filter_by(CID=cid, user_id=current_user.id).first()
ffs = Ffs.query.get(file.ffs_id)
try:
# Retrieve data from Filecoin
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
data_ = powergate.ffs.get(file.CID, ffs.token)
# Save the downloaded data as a file
# Use the user data directory configured for the app
user_data = app.config["USER_DATA_DIR"]
if not os.path.exists(user_data):
os.makedirs(user_data)
print(user_data)
# Create a subdirectory per username. Usernames are unique.
user_dir = os.path.join(
user_data, str(current_user.id) + "-" + current_user.username)
if not os.path.exists(user_dir):
os.makedirs(user_dir)
print(user_dir)
# Create a Filecoin downloads subdirectory.
filecoin_dir = os.path.join(user_dir, "filecoin/downloads")
if not os.path.exists(filecoin_dir):
os.makedirs(filecoin_dir)
print(filecoin_dir)
with open(os.path.join(filecoin_dir, file.file_name), "wb") as out_file:
# Iterate over the data byte chunks and save them to an output file
for data in data_:
out_file.write(data)
# Create path to download file
safe_path = safe_join("../" + filecoin_dir, file.file_name)
print(safe_path)
# Offer the file for download to local machine
return send_file(safe_path, as_attachment=True)
# TODO: CLEAR CACHED FILES IN DOWNLOAD DIRECTORY
except Exception as e:
# Output error message if download from Filecoin fails
flash("failed to download '{}' from Filecoin. {}".format(
file.file_name, e), "alert-danger")
# Update log table with error
event = Logs(
timestamp=datetime.now().replace(microsecond=0),
event="Download ERROR: "
+ file.file_name
+ " CID: "
+ file.CID
+ " "
+ str(e),
user_id=current_user.id,
)
db.session.add(event)
db.session.commit()
files = Files.query.filter_by(user_id=current_user.id).all()
return render_template("filecoin/filecoin-files.html", files=files, breadcrumb="Filecoin / Files")
@ app.route('/filecoin-wallets')
@ login_required
def filecoin_wallets():
"""
Retrieve all wallets from all FFSes and save them in a list for
presentation on the UI template
"""
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
try:
ffs = Ffs.query.filter_by(user_id=current_user.id).one()
except:
flash("No wallets created yet.", "alert-danger")
return render_template("filecoin/filecoin-wallets.html", wallets=None, breadcrumb="Filecoin / Wallets")
wallets = []
addresses = powergate.ffs.addrs_list(ffs.token)
for address in addresses.addrs:
balance = powergate.wallet.balance(address.addr)
wallets.append(
{
"ffs": ffs.ffs_id,
"name": address.name,
"address": address.addr,
"type": address.type,
"balance": str(balance.balance),
}
)
return render_template("filecoin/filecoin-wallets.html", wallets=wallets, breadcrumb="Filecoin / Wallets")
| 33.873016
| 111
| 0.646204
|
import os
from datetime import datetime
from flask import Flask, render_template, flash, safe_join, send_file
from flask_user import login_required, current_user
from werkzeug.utils import secure_filename
from pygate_grpc.client import PowerGateClient
from deplatformr.models.filecoin_models import Ffs, Files, Logs
from deplatformr import app, db
@app.route('/filecoin-files')
@login_required
def filecoin_files():
files = Files.query.filter_by(user_id=current_user.id).all()
return render_template("filecoin/filecoin-files.html", files=files, breadcrumb="Filecoin / Files")
@app.route("/filecoin-download/<cid>", methods=["GET"])
@login_required
def filecoin_download(cid):
file = Files.query.filter_by(CID=cid, user_id=current_user.id).first()
ffs = Ffs.query.get(file.ffs_id)
try:
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
data_ = powergate.ffs.get(file.CID, ffs.token)
user_data = app.config["USER_DATA_DIR"]
if not os.path.exists(user_data):
os.makedirs(user_data)
print(user_data)
user_dir = os.path.join(
user_data, str(current_user.id) + "-" + current_user.username)
if not os.path.exists(user_dir):
os.makedirs(user_dir)
print(user_dir)
filecoin_dir = os.path.join(user_dir, "filecoin/downloads")
if not os.path.exists(filecoin_dir):
os.makedirs(filecoin_dir)
print(filecoin_dir)
with open(os.path.join(filecoin_dir, file.file_name), "wb") as out_file:
for data in data_:
out_file.write(data)
safe_path = safe_join("../" + filecoin_dir, file.file_name)
print(safe_path)
return send_file(safe_path, as_attachment=True)
except Exception as e:
flash("failed to download '{}' from Filecoin. {}".format(
file.file_name, e), "alert-danger")
event = Logs(
timestamp=datetime.now().replace(microsecond=0),
event="Download ERROR: "
+ file.file_name
+ " CID: "
+ file.CID
+ " "
+ str(e),
user_id=current_user.id,
)
db.session.add(event)
db.session.commit()
files = Files.query.filter_by(user_id=current_user.id).all()
return render_template("filecoin/filecoin-files.html", files=files, breadcrumb="Filecoin / Files")
@ app.route('/filecoin-wallets')
@ login_required
def filecoin_wallets():
powergate = PowerGateClient(app.config["POWERGATE_ADDRESS"])
try:
ffs = Ffs.query.filter_by(user_id=current_user.id).one()
except:
flash("No wallets created yet.", "alert-danger")
return render_template("filecoin/filecoin-wallets.html", wallets=None, breadcrumb="Filecoin / Wallets")
wallets = []
addresses = powergate.ffs.addrs_list(ffs.token)
for address in addresses.addrs:
balance = powergate.wallet.balance(address.addr)
wallets.append(
{
"ffs": ffs.ffs_id,
"name": address.name,
"address": address.addr,
"type": address.type,
"balance": str(balance.balance),
}
)
return render_template("filecoin/filecoin-wallets.html", wallets=wallets, breadcrumb="Filecoin / Wallets")
| true
| true
|
f70185ea6fa1e1036a31fb85275e059609151a59
| 787
|
py
|
Python
|
setup.py
|
charles-marceau/msnexport
|
9a130f83652824c27fde48cc28d7d0dc8da831d7
|
[
"MIT"
] | null | null | null |
setup.py
|
charles-marceau/msnexport
|
9a130f83652824c27fde48cc28d7d0dc8da831d7
|
[
"MIT"
] | null | null | null |
setup.py
|
charles-marceau/msnexport
|
9a130f83652824c27fde48cc28d7d0dc8da831d7
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='msnexport',
version='0.1',
license="MIT",
classifiers=["Programming Language :: Python :: 3.7"],
author='Charles Marceau',
author_email='[email protected]',
description='Export your old xml MSN history to pdf.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/charles-marceau/msnexport',
packages=find_packages(),
include_package_data=True,
install_requires=[
'beautifulsoup4',
'click',
'lxml',
'reportlab'
],
entry_points='''
[console_scripts]
msnexport=msnexport.cli:export
'''
)
| 26.233333
| 58
| 0.651842
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='msnexport',
version='0.1',
license="MIT",
classifiers=["Programming Language :: Python :: 3.7"],
author='Charles Marceau',
author_email='[email protected]',
description='Export your old xml MSN history to pdf.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/charles-marceau/msnexport',
packages=find_packages(),
include_package_data=True,
install_requires=[
'beautifulsoup4',
'click',
'lxml',
'reportlab'
],
entry_points='''
[console_scripts]
msnexport=msnexport.cli:export
'''
)
| true
| true
|
f7018712ee7626943f710f00b08771e4841e72e4
| 3,983
|
py
|
Python
|
Payload_Type/apollo/mythic/agent_functions/inject.py
|
n0pe-sled/Apollo
|
cfc5804d163e1b47f6614321434a717b2bd2066f
|
[
"BSD-3-Clause"
] | null | null | null |
Payload_Type/apollo/mythic/agent_functions/inject.py
|
n0pe-sled/Apollo
|
cfc5804d163e1b47f6614321434a717b2bd2066f
|
[
"BSD-3-Clause"
] | null | null | null |
Payload_Type/apollo/mythic/agent_functions/inject.py
|
n0pe-sled/Apollo
|
cfc5804d163e1b47f6614321434a717b2bd2066f
|
[
"BSD-3-Clause"
] | null | null | null |
from mythic_payloadtype_container.MythicCommandBase import *
import json
from mythic_payloadtype_container.MythicRPC import *
import base64
class InjectArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"template": CommandParameter(name="Payload Template", type=ParameterType.Payload, supported_agents=["apollo"], supported_agent_build_parameters={"apollo": {"output_type": "Shellcode"}}),
"pid": CommandParameter(name="PID", type=ParameterType.Number),
}
errorMsg = "Missing required parameter: {}"
async def parse_arguments(self):
if (self.command_line[0] != "{"):
raise Exception("Inject requires JSON parameters and not raw command line.")
self.load_args_from_json_string(self.command_line)
class InjectCommand(CommandBase):
cmd = "inject"
needs_admin = False
help_cmd = "inject (modal popup)"
description = "Inject agent shellcode into a remote process."
version = 2
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
script_only = True
author = "@djhohnstein"
argument_class = InjectArguments
attackmapping = ["T1055"]
async def shinject_completed(self, task: MythicTask, subtask: dict = None, subtask_group_name: str = None) -> MythicTask:
task.status = MythicStatus.Completed
return task
async def create_tasking(self, task: MythicTask) -> MythicTask:
temp = await MythicRPC().execute("get_payload",
payload_uuid=task.args.get_arg("template"))
gen_resp = await MythicRPC().execute("create_payload_from_uuid",
task_id=task.id,
payload_uuid=task.args.get_arg('template'),
new_description="{}'s injection into PID {}".format(task.operator, str(task.args.get_arg("pid"))))
if gen_resp.status == MythicStatus.Success:
# we know a payload is building, now we want it
while True:
resp = await MythicRPC().execute("get_payload",
payload_uuid=gen_resp.response["uuid"],
get_contents=True)
if resp.status == MythicStatus.Success:
if resp.response["build_phase"] == 'success':
b64contents = resp.response["contents"]
pe = base64.b64decode(b64contents)
if len(pe) > 1 and pe[:2] == b"\x4d\x5a":
raise Exception("Inject requires a payload of Raw output, but got an executable.")
# it's done, so we can register a file for it
task.display_params = "payload '{}' into PID {}".format(temp.response["tag"], task.args.get_arg("pid"))
response = await MythicRPC().execute("create_subtask", parent_task_id=task.id,
command="shinject", params_dict={"PID": task.args.get_arg("pid"), "Shellcode File ID": resp.response["file"]["agent_file_id"]},
subtask_callback_function="shinject_completed")
task.status = MythicStatus.Processed
break
elif resp.response["build_phase"] == 'error':
raise Exception("Failed to build new payload: " + resp.response["error_message"])
else:
await asyncio.sleep(1)
else:
raise Exception("Failed to build payload from template {}".format(task.args.get_arg("template")))
return task
async def process_response(self, response: AgentResponse):
pass
| 49.17284
| 198
| 0.581471
|
from mythic_payloadtype_container.MythicCommandBase import *
import json
from mythic_payloadtype_container.MythicRPC import *
import base64
class InjectArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"template": CommandParameter(name="Payload Template", type=ParameterType.Payload, supported_agents=["apollo"], supported_agent_build_parameters={"apollo": {"output_type": "Shellcode"}}),
"pid": CommandParameter(name="PID", type=ParameterType.Number),
}
errorMsg = "Missing required parameter: {}"
async def parse_arguments(self):
if (self.command_line[0] != "{"):
raise Exception("Inject requires JSON parameters and not raw command line.")
self.load_args_from_json_string(self.command_line)
class InjectCommand(CommandBase):
cmd = "inject"
needs_admin = False
help_cmd = "inject (modal popup)"
description = "Inject agent shellcode into a remote process."
version = 2
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
script_only = True
author = "@djhohnstein"
argument_class = InjectArguments
attackmapping = ["T1055"]
async def shinject_completed(self, task: MythicTask, subtask: dict = None, subtask_group_name: str = None) -> MythicTask:
task.status = MythicStatus.Completed
return task
async def create_tasking(self, task: MythicTask) -> MythicTask:
temp = await MythicRPC().execute("get_payload",
payload_uuid=task.args.get_arg("template"))
gen_resp = await MythicRPC().execute("create_payload_from_uuid",
task_id=task.id,
payload_uuid=task.args.get_arg('template'),
new_description="{}'s injection into PID {}".format(task.operator, str(task.args.get_arg("pid"))))
if gen_resp.status == MythicStatus.Success:
# we know a payload is building, now we want it
while True:
resp = await MythicRPC().execute("get_payload",
payload_uuid=gen_resp.response["uuid"],
get_contents=True)
if resp.status == MythicStatus.Success:
if resp.response["build_phase"] == 'success':
b64contents = resp.response["contents"]
pe = base64.b64decode(b64contents)
if len(pe) > 1 and pe[:2] == b"\x4d\x5a":
raise Exception("Inject requires a payload of Raw output, but got an executable.")
# it's done, so we can register a file for it
task.display_params = "payload '{}' into PID {}".format(temp.response["tag"], task.args.get_arg("pid"))
response = await MythicRPC().execute("create_subtask", parent_task_id=task.id,
command="shinject", params_dict={"PID": task.args.get_arg("pid"), "Shellcode File ID": resp.response["file"]["agent_file_id"]},
subtask_callback_function="shinject_completed")
task.status = MythicStatus.Processed
break
elif resp.response["build_phase"] == 'error':
raise Exception("Failed to build new payload: " + resp.response["error_message"])
else:
await asyncio.sleep(1)
else:
raise Exception("Failed to build payload from template {}".format(task.args.get_arg("template")))
return task
async def process_response(self, response: AgentResponse):
pass
| true
| true
|
f7018880849f101375cb5c61af38b6fbdabcf866
| 1,045
|
py
|
Python
|
src/data/gbm.py
|
imanolperez/optimal-double-execution
|
b380087765925043b01fe2f1066e5e2d1d850cf9
|
[
"MIT"
] | 4
|
2020-05-20T13:56:36.000Z
|
2021-01-05T12:41:47.000Z
|
src/data/gbm.py
|
imanolperez/optimal-double-execution
|
b380087765925043b01fe2f1066e5e2d1d850cf9
|
[
"MIT"
] | null | null | null |
src/data/gbm.py
|
imanolperez/optimal-double-execution
|
b380087765925043b01fe2f1066e5e2d1d850cf9
|
[
"MIT"
] | 3
|
2020-07-02T17:52:05.000Z
|
2022-03-15T14:07:08.000Z
|
import numpy as np
from .base import Price
class GBM(Price):
"""Brownian motion."""
def __init__(self, T=1., sigma1=0.02, sigma2=0.01, s1=1., s2=1.,
drift1=0., drift2=0., n=100):
self.sigma1 = sigma1
self.sigma2 = sigma2
self.drift1 = drift1
self.drift2 = drift2
self.n = n
self.s1 = s1
self.s2 = s2
self.T = T
def generate(self):
dt1 = self.sigma1 ** 2 * self.T / self.n
dt2 = self.sigma2 ** 2 * self.T / self.n
bm1 = np.r_[[0.], np.sqrt(dt1) * np.random.randn(self.n - 1).cumsum()]
bm2 = np.r_[[0.], np.sqrt(dt2) * np.random.randn(self.n - 1).cumsum()]
path = np.c_[np.linspace(0, self.T, self.n), bm1, bm2]
path[:, 1] = np.exp((self.drift1 - self.sigma1 ** 2 / 2.) * path[:, 0] + self.sigma1 * path[:, 1])
path[:, 2] = np.exp((self.drift2 - self.sigma2 ** 2 / 2.) * path[:, 0] + self.sigma2 * path[:, 2])
path[:, 1] *= self.s1
path[:, 2] *= self.s2
return path
| 30.735294
| 106
| 0.507177
|
import numpy as np
from .base import Price
class GBM(Price):
def __init__(self, T=1., sigma1=0.02, sigma2=0.01, s1=1., s2=1.,
drift1=0., drift2=0., n=100):
self.sigma1 = sigma1
self.sigma2 = sigma2
self.drift1 = drift1
self.drift2 = drift2
self.n = n
self.s1 = s1
self.s2 = s2
self.T = T
def generate(self):
dt1 = self.sigma1 ** 2 * self.T / self.n
dt2 = self.sigma2 ** 2 * self.T / self.n
bm1 = np.r_[[0.], np.sqrt(dt1) * np.random.randn(self.n - 1).cumsum()]
bm2 = np.r_[[0.], np.sqrt(dt2) * np.random.randn(self.n - 1).cumsum()]
path = np.c_[np.linspace(0, self.T, self.n), bm1, bm2]
path[:, 1] = np.exp((self.drift1 - self.sigma1 ** 2 / 2.) * path[:, 0] + self.sigma1 * path[:, 1])
path[:, 2] = np.exp((self.drift2 - self.sigma2 ** 2 / 2.) * path[:, 0] + self.sigma2 * path[:, 2])
path[:, 1] *= self.s1
path[:, 2] *= self.s2
return path
| true
| true
|
f7018a32efde0c031de2107c39d92c396f5cf009
| 5,530
|
py
|
Python
|
example.py
|
ms2300/multiplayer-elo
|
ee5e0899e8ff513af336589876abd4cd89ed922b
|
[
"BSD-3-Clause"
] | 6
|
2016-09-19T04:04:43.000Z
|
2022-02-14T22:22:14.000Z
|
example.py
|
ms2300/multiplayer-elo
|
ee5e0899e8ff513af336589876abd4cd89ed922b
|
[
"BSD-3-Clause"
] | null | null | null |
example.py
|
ms2300/multiplayer-elo
|
ee5e0899e8ff513af336589876abd4cd89ed922b
|
[
"BSD-3-Clause"
] | 1
|
2017-08-19T08:49:06.000Z
|
2017-08-19T08:49:06.000Z
|
# Copyright (c) 2016 by Matt Sewall.
# All rights reserved.
import math
import csv
import json
import os
import shutil
from sys import argv
from datetime import datetime
from django.utils.encoding import smart_str, smart_unicode
from operator import itemgetter
from elo_classes import *
from elo import *
# Elos dictionaries contain athletes keyed to an elo value
# Entries dictionaries contain athletes keyed to history of their results
elos_boys = {}
elos_girls = {}
entries_boys = {}
entries_girls = {}
_DEFELO = 1500.0
def do_elo(data, meetName, meetDate, gender):
if gender == "female":
elos = elos_girls
entries = entries_girls
elif gender == "male":
elos = elos_boys
entries = entries_boys
# Add players to competition and calculate elos
meet = Meet()
meet.competitors = []
for dat in data:
name = dat[0]
place = int(dat[1])
school = dat[2]
ath = Athlete(name, school)
if ath in elos:
elo = float(elos.get(ath))
meet.addCompetitor(name, place, elo, school)
else:
# defaults to elo of 1500 on athletes first meet
meet.addCompetitor(name, place, _DEFELO, school)
calculateElo(meet.competitors)
# Take results of competition and append data
for runner in meet.competitors:
ather = Athlete(runner.name, runner.school)
elos[ather] = runner.elo
if ather in entries:
res_list = entries.get(ather)
res_list.append([meetName, meetDate, runner.elo])
entries[ather] = res_list
else:
entries[ather] = [[meetName, meetDate, runner.elo]]
def align_data(filename):
filex = open(filename)
sort = []
for json_string in filex:
parsed = json.loads(json_string)
results = parsed["results"]
kill = False
locs = parsed["meetLocation"]
a_date = parsed["meetDate"]
exact_date = datetime.strptime(a_date[0], "%A, %B %d, %Y")
for loc in locs:
if loc == u'Collegiate' or loc == u'MS':
kill = True
for result in results:
if result.keys() == [u'maleResults'] or [u'femaleResults']:
static = result.values()
events = static[0]
for event in events:
data = []
data.append(exact_date)
data.append(parsed['meetName'])
if result.keys() == [u'maleResults']:
data.append("male")
elif result.keys() == [u'femaleResults']:
data.append("female")
places = []
details = event[u'eventDetails']
for detail in details:
killx = False
ath_detail_List = []
ath_detail_List.append(
smart_str(detail[u'resultName']))
if detail[u'resultPlace'] == " " or \
detail[u'resultPlace'] == u' ':
killx = True
else:
ath_detail_List.append(detail[u'resultPlace'])
ath_detail_List.append(
smart_str(detail[u'resultSchool']))
if killx is False:
places.append(ath_detail_List)
data.append(places)
if kill is False:
sort.append(data)
sortx = sorted(sort, key=itemgetter(0))
return sortx
def write_ath(entries):
if entries == entries_boys:
path = "./meets/boys"
elif entries == entries_girls:
path = "./meets/girls"
if not os.path.exists("./meets/"):
os.mkdir("./meets/")
if not os.path.exists(path):
os.mkdir(path + "/")
for ath in entries:
school_path = os.path.join(path, ath.school)
ath_path = os.path.join(school_path, ath.name + ".csv")
filename = "%s.csv" % ath.name
with open((filename), "w") as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(entries[ath])
if os.path.exists(school_path):
shutil.move(filename, ath_path)
else:
os.mkdir(school_path)
shutil.move(filename, ath_path)
def write_elo(elos, gender):
if gender == "male":
name = "athlete_elo_boys.csv"
elif gender == "female":
name = "athlete_elo_girls.csv"
with open((name), "w") as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(elos)
def main():
# isolates every event and pass that into the do_elo function by gender
events = align_data(argv[1])
count = 0
for event in events:
# makes sure there are 4 values of (name, date, gender, and results)
if len(event) == 4:
print count
count += 1
name = smart_str(event[1][0])
date = event[0]
gender = event[2]
do_elo(event[3], name, date, gender)
# sorts the dictionaries by ascending elo
sorted_boys = sorted(elos_boys.items(), key=itemgetter(1))
sorted_girls = sorted(elos_girls.items(), key=itemgetter(1))
write_elo(sorted_boys, "male")
write_elo(sorted_girls, "female")
write_ath(entries_girls)
write_ath(entries_boys)
if __name__ == '__main__':
main()
| 32.529412
| 76
| 0.547016
|
import math
import csv
import json
import os
import shutil
from sys import argv
from datetime import datetime
from django.utils.encoding import smart_str, smart_unicode
from operator import itemgetter
from elo_classes import *
from elo import *
elos_boys = {}
elos_girls = {}
entries_boys = {}
entries_girls = {}
_DEFELO = 1500.0
def do_elo(data, meetName, meetDate, gender):
if gender == "female":
elos = elos_girls
entries = entries_girls
elif gender == "male":
elos = elos_boys
entries = entries_boys
meet = Meet()
meet.competitors = []
for dat in data:
name = dat[0]
place = int(dat[1])
school = dat[2]
ath = Athlete(name, school)
if ath in elos:
elo = float(elos.get(ath))
meet.addCompetitor(name, place, elo, school)
else:
meet.addCompetitor(name, place, _DEFELO, school)
calculateElo(meet.competitors)
for runner in meet.competitors:
ather = Athlete(runner.name, runner.school)
elos[ather] = runner.elo
if ather in entries:
res_list = entries.get(ather)
res_list.append([meetName, meetDate, runner.elo])
entries[ather] = res_list
else:
entries[ather] = [[meetName, meetDate, runner.elo]]
def align_data(filename):
filex = open(filename)
sort = []
for json_string in filex:
parsed = json.loads(json_string)
results = parsed["results"]
kill = False
locs = parsed["meetLocation"]
a_date = parsed["meetDate"]
exact_date = datetime.strptime(a_date[0], "%A, %B %d, %Y")
for loc in locs:
if loc == u'Collegiate' or loc == u'MS':
kill = True
for result in results:
if result.keys() == [u'maleResults'] or [u'femaleResults']:
static = result.values()
events = static[0]
for event in events:
data = []
data.append(exact_date)
data.append(parsed['meetName'])
if result.keys() == [u'maleResults']:
data.append("male")
elif result.keys() == [u'femaleResults']:
data.append("female")
places = []
details = event[u'eventDetails']
for detail in details:
killx = False
ath_detail_List = []
ath_detail_List.append(
smart_str(detail[u'resultName']))
if detail[u'resultPlace'] == " " or \
detail[u'resultPlace'] == u' ':
killx = True
else:
ath_detail_List.append(detail[u'resultPlace'])
ath_detail_List.append(
smart_str(detail[u'resultSchool']))
if killx is False:
places.append(ath_detail_List)
data.append(places)
if kill is False:
sort.append(data)
sortx = sorted(sort, key=itemgetter(0))
return sortx
def write_ath(entries):
if entries == entries_boys:
path = "./meets/boys"
elif entries == entries_girls:
path = "./meets/girls"
if not os.path.exists("./meets/"):
os.mkdir("./meets/")
if not os.path.exists(path):
os.mkdir(path + "/")
for ath in entries:
school_path = os.path.join(path, ath.school)
ath_path = os.path.join(school_path, ath.name + ".csv")
filename = "%s.csv" % ath.name
with open((filename), "w") as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(entries[ath])
if os.path.exists(school_path):
shutil.move(filename, ath_path)
else:
os.mkdir(school_path)
shutil.move(filename, ath_path)
def write_elo(elos, gender):
if gender == "male":
name = "athlete_elo_boys.csv"
elif gender == "female":
name = "athlete_elo_girls.csv"
with open((name), "w") as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(elos)
def main():
events = align_data(argv[1])
count = 0
for event in events:
if len(event) == 4:
print count
count += 1
name = smart_str(event[1][0])
date = event[0]
gender = event[2]
do_elo(event[3], name, date, gender)
sorted_boys = sorted(elos_boys.items(), key=itemgetter(1))
sorted_girls = sorted(elos_girls.items(), key=itemgetter(1))
write_elo(sorted_boys, "male")
write_elo(sorted_girls, "female")
write_ath(entries_girls)
write_ath(entries_boys)
if __name__ == '__main__':
main()
| false
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.