commit
stringlengths
40
40
old_file
stringlengths
5
117
new_file
stringlengths
5
117
old_contents
stringlengths
0
1.93k
new_contents
stringlengths
19
3.3k
subject
stringlengths
17
320
message
stringlengths
18
3.28k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
7
42.4k
completion
stringlengths
19
3.3k
prompt
stringlengths
21
3.65k
ac3c855583a023fc76b8720aa7e38419b28a26d4
falcom/api/hathi.py
falcom/api/hathi.py
# Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. import json def get_counts_from_item_list (items, htid): a = len([x for x in items if x["htid"] == htid]) b = len(items) - a return a, b def get_oclc_counts_from_json (json_data, htid = ""): try: data = json.loads(json_data) return get_counts_from_item_list(data["items"], htid) except: return 0, 0 def get_hathi_data_from_json (): return ()
# Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. import json class HathiItems: def __init__ (self): pass def __len__ (self): return 0 def get_counts_from_item_list (items, htid): a = len([x for x in items if x["htid"] == htid]) b = len(items) - a return a, b def get_oclc_counts_from_json (json_data, htid = ""): try: data = json.loads(json_data) return get_counts_from_item_list(data["items"], htid) except: return 0, 0 def get_hathi_data_from_json (): return HathiItems()
Refactor empty tuple into empty object with len()
Refactor empty tuple into empty object with len()
Python
bsd-3-clause
mlibrary/image-conversion-and-validation,mlibrary/image-conversion-and-validation
# Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. import json class HathiItems: def __init__ (self): pass def __len__ (self): return 0 def get_counts_from_item_list (items, htid): a = len([x for x in items if x["htid"] == htid]) b = len(items) - a return a, b def get_oclc_counts_from_json (json_data, htid = ""): try: data = json.loads(json_data) return get_counts_from_item_list(data["items"], htid) except: return 0, 0 def get_hathi_data_from_json (): return HathiItems()
Refactor empty tuple into empty object with len() # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. import json def get_counts_from_item_list (items, htid): a = len([x for x in items if x["htid"] == htid]) b = len(items) - a return a, b def get_oclc_counts_from_json (json_data, htid = ""): try: data = json.loads(json_data) return get_counts_from_item_list(data["items"], htid) except: return 0, 0 def get_hathi_data_from_json (): return ()
cdf60bc0b07c282e75fba747c8adedd165aa0abd
index.py
index.py
#!/usr/bin/env python2.7 from werkzeug.wrappers import Request, Response from get_html import get_html, choose_lang @Request.application def run(request): lang = choose_lang(request) if request.url.startswith("https://") or request.args.get("forcenossl") == "true": html = get_html("launch", lang) else: html = get_html("nossl", lang) return Response(html, mimetype="text/html") if __name__ == "__main__": import CGI CGI.app = run CGI.run()
#!/usr/bin/env python2.7 from werkzeug.wrappers import Request, Response from get_html import get_html, choose_lang @Request.application def run(request): lang = request.args.get("lang") if request.args.get("lang") else choose_lang(request) if request.url.startswith("https://") or request.args.get("forcenossl") == "true": html = get_html("launch", lang) else: html = get_html("nossl", lang) return Response(html, mimetype="text/html") if __name__ == "__main__": import CGI CGI.app = run CGI.run()
Make the language changeable via a GET parameter.
Make the language changeable via a GET parameter.
Python
mit
YtvwlD/dyluna,YtvwlD/dyluna,YtvwlD/dyluna
#!/usr/bin/env python2.7 from werkzeug.wrappers import Request, Response from get_html import get_html, choose_lang @Request.application def run(request): lang = request.args.get("lang") if request.args.get("lang") else choose_lang(request) if request.url.startswith("https://") or request.args.get("forcenossl") == "true": html = get_html("launch", lang) else: html = get_html("nossl", lang) return Response(html, mimetype="text/html") if __name__ == "__main__": import CGI CGI.app = run CGI.run()
Make the language changeable via a GET parameter. #!/usr/bin/env python2.7 from werkzeug.wrappers import Request, Response from get_html import get_html, choose_lang @Request.application def run(request): lang = choose_lang(request) if request.url.startswith("https://") or request.args.get("forcenossl") == "true": html = get_html("launch", lang) else: html = get_html("nossl", lang) return Response(html, mimetype="text/html") if __name__ == "__main__": import CGI CGI.app = run CGI.run()
b08e7fd64da5342508807420e5c9aa6c3686a68e
scripts/analytics/institutions.py
scripts/analytics/institutions.py
from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
Add basic script without the ability to send the data anywhere
Add basic script without the ability to send the data anywhere
Python
apache-2.0
Johnetordoff/osf.io,hmoco/osf.io,laurenrevere/osf.io,mattclark/osf.io,baylee-d/osf.io,icereval/osf.io,laurenrevere/osf.io,sloria/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,binoculars/osf.io,caseyrollins/osf.io,alexschiller/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,crcresearch/osf.io,felliott/osf.io,caneruguz/osf.io,chrisseto/osf.io,emetsger/osf.io,hmoco/osf.io,monikagrabowska/osf.io,emetsger/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,mluo613/osf.io,adlius/osf.io,Johnetordoff/osf.io,hmoco/osf.io,erinspace/osf.io,saradbowman/osf.io,rdhyee/osf.io,acshi/osf.io,cwisecarver/osf.io,sloria/osf.io,Nesiehr/osf.io,felliott/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,rdhyee/osf.io,chennan47/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,acshi/osf.io,chrisseto/osf.io,Nesiehr/osf.io,caneruguz/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,adlius/osf.io,leb2dg/osf.io,chrisseto/osf.io,erinspace/osf.io,adlius/osf.io,mfraezz/osf.io,erinspace/osf.io,aaxelb/osf.io,hmoco/osf.io,emetsger/osf.io,chennan47/osf.io,Nesiehr/osf.io,mluo613/osf.io,pattisdr/osf.io,icereval/osf.io,alexschiller/osf.io,mfraezz/osf.io,chennan47/osf.io,TomBaxter/osf.io,mattclark/osf.io,binoculars/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,saradbowman/osf.io,acshi/osf.io,caneruguz/osf.io,emetsger/osf.io,rdhyee/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,mluo613/osf.io,mluo613/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,adlius/osf.io,acshi/osf.io,TomBaxter/osf.io,aaxelb/osf.io,cwisecarver/osf.io,mfraezz/osf.io,alexschiller/osf.io,baylee-d/osf.io,icereval/osf.io,leb2dg/osf.io,alexschiller/osf.io,alexschiller/osf.io,laurenrevere/osf.io,pattisdr/osf.io,mattclark/osf.io,leb2dg/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,cwisecarver/osf.io,mluo613/osf.io,HalcyonChimera/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,felliott/osf.io,sloria/osf.io,cslzchen/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io
from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
Add basic script without the ability to send the data anywhere
f4b50b12ae8ad4da6e04ddc186c077c31af00611
SimpleHTTP404Server.py
SimpleHTTP404Server.py
import os import SimpleHTTPServer class GitHubHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """ Overrides the default request handler to handle GitHub custom 404 pages. (Pretty much a 404.html page in your root.) See https://help.github.com/articles/custom-404-pages This currently only works for erroneous pages in the root directory, but that's enough to test what the 404 page looks like. """ def do_GET(self): path = self.translate_path(self.path) print(self.path) print(path) # If the path doesn't exist, fake it to be the 404 page. if not os.path.exists(path): self.path = '404.html' # Call the superclass methods to actually serve the page. SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) print(self.path) print(self.translate_path(self.path)) SimpleHTTPServer.test(GitHubHandler)
import os import SimpleHTTPServer class GitHubHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """ Overrides the default request handler to handle GitHub custom 404 pages. (Pretty much a 404.html page in your root.) See https://help.github.com/articles/custom-404-pages This currently only works for erroneous pages in the root directory, but that's enough to test what the 404 page looks like. """ def do_GET(self): path = self.translate_path(self.path) # If the path doesn't exist, fake it to be the 404 page. if not os.path.exists(path): self.path = '404.html' # Call the superclass methods to actually serve the page. SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) SimpleHTTPServer.test(GitHubHandler)
Remove some print lines from the fake server.
Remove some print lines from the fake server.
Python
mit
clokep/SimpleHTTP404Server,clokep/SimpleHTTP404Server
import os import SimpleHTTPServer class GitHubHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """ Overrides the default request handler to handle GitHub custom 404 pages. (Pretty much a 404.html page in your root.) See https://help.github.com/articles/custom-404-pages This currently only works for erroneous pages in the root directory, but that's enough to test what the 404 page looks like. """ def do_GET(self): path = self.translate_path(self.path) # If the path doesn't exist, fake it to be the 404 page. if not os.path.exists(path): self.path = '404.html' # Call the superclass methods to actually serve the page. SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) SimpleHTTPServer.test(GitHubHandler)
Remove some print lines from the fake server. import os import SimpleHTTPServer class GitHubHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """ Overrides the default request handler to handle GitHub custom 404 pages. (Pretty much a 404.html page in your root.) See https://help.github.com/articles/custom-404-pages This currently only works for erroneous pages in the root directory, but that's enough to test what the 404 page looks like. """ def do_GET(self): path = self.translate_path(self.path) print(self.path) print(path) # If the path doesn't exist, fake it to be the 404 page. if not os.path.exists(path): self.path = '404.html' # Call the superclass methods to actually serve the page. SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) print(self.path) print(self.translate_path(self.path)) SimpleHTTPServer.test(GitHubHandler)
78e24093f314821d7818f31574dbe521c0ae5fef
sharepa/__init__.py
sharepa/__init__.py
from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa def source_counts(): return bucket_to_dataframe( 'total_source_counts', basic_search.execute().aggregations.sourceAgg.buckets )
Make it so that source_counts is only executed on purpose
Make it so that source_counts is only executed on purpose
Python
mit
erinspace/sharepa,CenterForOpenScience/sharepa,fabianvf/sharepa,samanehsan/sharepa
from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa def source_counts(): return bucket_to_dataframe( 'total_source_counts', basic_search.execute().aggregations.sourceAgg.buckets )
Make it so that source_counts is only executed on purpose from sharepa.search import ShareSearch, basic_search # noqa from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
627217b13482fff5451d3aa03867923925c49ec8
sale_order_add_variants/__openerp__.py
sale_order_add_variants/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author: Hugo Santos # Copyright 2015 FactorLibre # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Sale Order Add Variants', 'summary': 'Add variants from template into sale order', 'version': '0.1', 'author': 'Factorlibre,Odoo Community Association (OCA)', 'category': 'Sale', 'license': 'AGPL-3', 'website': 'http://factorlibre.com', 'depends': [ 'sale' ], 'demo': [], 'data': [ 'security/sale_order_add_variants_security.xml', 'view/sale_add_variants_view.xml', 'view/sale_view.xml', 'view/res_config_view.xml' ], 'installable': True }
# -*- coding: utf-8 -*- ############################################################################## # # Author: Hugo Santos # Copyright 2015 FactorLibre # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Sale Order Add Variants', 'summary': 'Add variants from template into sale order', 'version': '0.1', 'author': 'FactorLibre,Odoo Community Association (OCA)', 'category': 'Sale', 'license': 'AGPL-3', 'website': 'http://factorlibre.com', 'depends': [ 'sale' ], 'demo': [], 'data': [ 'security/sale_order_add_variants_security.xml', 'view/sale_add_variants_view.xml', 'view/sale_view.xml', 'view/res_config_view.xml' ], 'installable': True }
Fix typo in author FactorLibre
Fix typo in author FactorLibre
Python
agpl-3.0
kittiu/sale-workflow,Endika/sale-workflow,alexsandrohaag/sale-workflow,xpansa/sale-workflow,diagramsoftware/sale-workflow,BT-ojossen/sale-workflow,brain-tec/sale-workflow,brain-tec/sale-workflow,luistorresm/sale-workflow,numerigraphe/sale-workflow,anybox/sale-workflow,open-synergy/sale-workflow,BT-fgarbely/sale-workflow,anas-taji/sale-workflow,Rona111/sale-workflow,numerigraphe/sale-workflow,akretion/sale-workflow,jabibi/sale-workflow,acsone/sale-workflow,jjscarafia/sale-workflow,kittiu/sale-workflow,Antiun/sale-workflow,akretion/sale-workflow,adhoc-dev/sale-workflow,fevxie/sale-workflow,ddico/sale-workflow,factorlibre/sale-workflow,BT-jmichaud/sale-workflow,BT-cserra/sale-workflow,Eficent/sale-workflow,thomaspaulb/sale-workflow,acsone/sale-workflow,richard-willowit/sale-workflow
# -*- coding: utf-8 -*- ############################################################################## # # Author: Hugo Santos # Copyright 2015 FactorLibre # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Sale Order Add Variants', 'summary': 'Add variants from template into sale order', 'version': '0.1', 'author': 'FactorLibre,Odoo Community Association (OCA)', 'category': 'Sale', 'license': 'AGPL-3', 'website': 'http://factorlibre.com', 'depends': [ 'sale' ], 'demo': [], 'data': [ 'security/sale_order_add_variants_security.xml', 'view/sale_add_variants_view.xml', 'view/sale_view.xml', 'view/res_config_view.xml' ], 'installable': True }
Fix typo in author FactorLibre # -*- coding: utf-8 -*- ############################################################################## # # Author: Hugo Santos # Copyright 2015 FactorLibre # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Sale Order Add Variants', 'summary': 'Add variants from template into sale order', 'version': '0.1', 'author': 'Factorlibre,Odoo Community Association (OCA)', 'category': 'Sale', 'license': 'AGPL-3', 'website': 'http://factorlibre.com', 'depends': [ 'sale' ], 'demo': [], 'data': [ 'security/sale_order_add_variants_security.xml', 'view/sale_add_variants_view.xml', 'view/sale_view.xml', 'view/res_config_view.xml' ], 'installable': True }
82da444753249df9bbd4c516a7b1f9f5a4a7a29a
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='yg.emanate', use_scm_version=True, description="Lightweight event system for Python", author="YouGov, plc", author_email='[email protected]', url='https://github.com/yougov/yg.emanate', packages=[ 'yg.emanate', ], namespace_packages=['yg'], include_package_data=True, setup_requires=['setuptools_scm>=1.15'], zip_safe=False, keywords='emanate', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], test_suite='tests', python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", )
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='yg.emanate', use_scm_version=True, description="Lightweight event system for Python", author="YouGov, plc", author_email='[email protected]', url='https://github.com/yougov/yg.emanate', packages=[ 'yg.emanate', ], namespace_packages=['yg'], include_package_data=True, setup_requires=['setuptools_scm>=1.15'], keywords='emanate', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], test_suite='tests', python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", )
Remove deprecated 'zip_safe' flag. It's probably safe anyhow.
Remove deprecated 'zip_safe' flag. It's probably safe anyhow.
Python
mit
yougov/emanate
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='yg.emanate', use_scm_version=True, description="Lightweight event system for Python", author="YouGov, plc", author_email='[email protected]', url='https://github.com/yougov/yg.emanate', packages=[ 'yg.emanate', ], namespace_packages=['yg'], include_package_data=True, setup_requires=['setuptools_scm>=1.15'], keywords='emanate', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], test_suite='tests', python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", )
Remove deprecated 'zip_safe' flag. It's probably safe anyhow. #!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='yg.emanate', use_scm_version=True, description="Lightweight event system for Python", author="YouGov, plc", author_email='[email protected]', url='https://github.com/yougov/yg.emanate', packages=[ 'yg.emanate', ], namespace_packages=['yg'], include_package_data=True, setup_requires=['setuptools_scm>=1.15'], zip_safe=False, keywords='emanate', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], test_suite='tests', python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", )
c7611391de40d2ac296f3a8dcb1579400eac0bdf
setup.py
setup.py
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.txt')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() requires = [ 'pyramid', 'pyramid_zodbconn', 'pyramid_tm', 'pyramid_debugtoolbar', 'ZODB3', 'waitress', 'repoze.folder', 'zope.interface', 'requests==0.14.0', ] setup(name='push-hub', version='0.2', description='push-hub', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires = requires, tests_require= requires, test_suite="pushhub", entry_points = """\ [paste.app_factory] main = pushhub:main """, )
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.txt')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() requires = [ 'pyramid', 'pyramid_zodbconn', 'pyramid_tm', 'pyramid_debugtoolbar', 'ZODB3', 'waitress', 'repoze.folder', 'zope.interface', 'requests', ] setup(name='push-hub', version='0.2', description='push-hub', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires = requires, tests_require= requires, test_suite="pushhub", entry_points = """\ [paste.app_factory] main = pushhub:main """, )
Move requests pin to versions.cfg in buildout
Move requests pin to versions.cfg in buildout
Python
bsd-3-clause
ucla/PushHubCore
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.txt')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() requires = [ 'pyramid', 'pyramid_zodbconn', 'pyramid_tm', 'pyramid_debugtoolbar', 'ZODB3', 'waitress', 'repoze.folder', 'zope.interface', 'requests', ] setup(name='push-hub', version='0.2', description='push-hub', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires = requires, tests_require= requires, test_suite="pushhub", entry_points = """\ [paste.app_factory] main = pushhub:main """, )
Move requests pin to versions.cfg in buildout import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.txt')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() requires = [ 'pyramid', 'pyramid_zodbconn', 'pyramid_tm', 'pyramid_debugtoolbar', 'ZODB3', 'waitress', 'repoze.folder', 'zope.interface', 'requests==0.14.0', ] setup(name='push-hub', version='0.2', description='push-hub', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires = requires, tests_require= requires, test_suite="pushhub", entry_points = """\ [paste.app_factory] main = pushhub:main """, )
6e426e4ae0dd3841ea7d92b7434c858cf39e9ef4
setup.py
setup.py
#!/usr/bin/env python import os, sys, glob from setuptools import setup, find_packages setup( name='aegea', version='0.6.0', url='https://github.com/kislyuk/aegea', license=open('LICENSE.md').readline().strip(), author='Andrey Kislyuk', author_email='[email protected]', description='Amazon Web Services Operator Interface', long_description=open('README.rst').read(), install_requires=[ 'setuptools', 'boto3 >= 1.3.0', 'argcomplete >= 1.1.0', 'paramiko >= 1.16.0', 'requests >= 2.9.1', 'tweak >= 0.3.2', 'keymaker >= 0.1.7', 'pyyaml >= 3.11', 'python-dateutil >= 2.1' ], extras_require={ ':python_version == "2.7"': [ 'enum34 >= 1.0.4', 'ipaddress >= 1.0.16', 'backports.statistics >= 0.1.0' ] }, packages=find_packages(exclude=['test']), scripts=glob.glob('scripts/*'), platforms=['MacOS X', 'Posix'], test_suite='test', include_package_data=True )
#!/usr/bin/env python import os, sys, glob, subprocess from setuptools import setup, find_packages try: version = subprocess.check_output(["git", "describe", "--tags", "--match", "v*.*.*"]).strip("v\n") except: version = "0.0.0" setup( name='aegea', version=version, url='https://github.com/kislyuk/aegea', license=open('LICENSE.md').readline().strip(), author='Andrey Kislyuk', author_email='[email protected]', description='Amazon Web Services Operator Interface', long_description=open('README.rst').read(), install_requires=[ 'setuptools', 'boto3 >= 1.3.0', 'argcomplete >= 1.1.0', 'paramiko >= 1.16.0', 'requests >= 2.9.1', 'tweak >= 0.3.2', 'keymaker >= 0.1.7', 'pyyaml >= 3.11', 'python-dateutil >= 2.1' ], extras_require={ ':python_version == "2.7"': [ 'enum34 >= 1.0.4', 'ipaddress >= 1.0.16', 'backports.statistics >= 0.1.0' ] }, packages=find_packages(exclude=['test']), scripts=glob.glob('scripts/*'), platforms=['MacOS X', 'Posix'], test_suite='test', include_package_data=True )
Use git describe output for version
Use git describe output for version
Python
apache-2.0
kislyuk/aegea,wholebiome/aegea,wholebiome/aegea,kislyuk/aegea,wholebiome/aegea,kislyuk/aegea
#!/usr/bin/env python import os, sys, glob, subprocess from setuptools import setup, find_packages try: version = subprocess.check_output(["git", "describe", "--tags", "--match", "v*.*.*"]).strip("v\n") except: version = "0.0.0" setup( name='aegea', version=version, url='https://github.com/kislyuk/aegea', license=open('LICENSE.md').readline().strip(), author='Andrey Kislyuk', author_email='[email protected]', description='Amazon Web Services Operator Interface', long_description=open('README.rst').read(), install_requires=[ 'setuptools', 'boto3 >= 1.3.0', 'argcomplete >= 1.1.0', 'paramiko >= 1.16.0', 'requests >= 2.9.1', 'tweak >= 0.3.2', 'keymaker >= 0.1.7', 'pyyaml >= 3.11', 'python-dateutil >= 2.1' ], extras_require={ ':python_version == "2.7"': [ 'enum34 >= 1.0.4', 'ipaddress >= 1.0.16', 'backports.statistics >= 0.1.0' ] }, packages=find_packages(exclude=['test']), scripts=glob.glob('scripts/*'), platforms=['MacOS X', 'Posix'], test_suite='test', include_package_data=True )
Use git describe output for version #!/usr/bin/env python import os, sys, glob from setuptools import setup, find_packages setup( name='aegea', version='0.6.0', url='https://github.com/kislyuk/aegea', license=open('LICENSE.md').readline().strip(), author='Andrey Kislyuk', author_email='[email protected]', description='Amazon Web Services Operator Interface', long_description=open('README.rst').read(), install_requires=[ 'setuptools', 'boto3 >= 1.3.0', 'argcomplete >= 1.1.0', 'paramiko >= 1.16.0', 'requests >= 2.9.1', 'tweak >= 0.3.2', 'keymaker >= 0.1.7', 'pyyaml >= 3.11', 'python-dateutil >= 2.1' ], extras_require={ ':python_version == "2.7"': [ 'enum34 >= 1.0.4', 'ipaddress >= 1.0.16', 'backports.statistics >= 0.1.0' ] }, packages=find_packages(exclude=['test']), scripts=glob.glob('scripts/*'), platforms=['MacOS X', 'Posix'], test_suite='test', include_package_data=True )
7955e777d6ba3bbbd104bd3916f131ab7fa8f8b5
asyncmongo/__init__.py
asyncmongo/__init__.py
#!/bin/env python # # Copyright 2010 bit.ly # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ AsyncMongo is an asynchronous library for accessing mongo http://github.com/bitly/asyncmongo """ try: import bson except ImportError: raise ImportError("bson library not installed. Install pymongo >= 1.9 https://github.com/mongodb/mongo-python-driver") # also update in setup.py version = "1.3" version_info = (1, 3) ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 """Descending sort order.""" GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`""" from errors import (Error, InterfaceError, AuthenticationError, DatabaseError, RSConnectionError, DataError, IntegrityError, ProgrammingError, NotSupportedError) from client import Client
#!/bin/env python # # Copyright 2010 bit.ly # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ AsyncMongo is an asynchronous library for accessing mongo http://github.com/bitly/asyncmongo """ try: import bson except ImportError: raise ImportError("bson library not installed. Install pymongo >= 1.9 https://github.com/mongodb/mongo-python-driver") # also update in setup.py version = "1.3" version_info = (1, 3) ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 """Descending sort order.""" GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`""" TEXT = '{ $meta: "textScore" }' """TEXT Index sort order.""" from errors import (Error, InterfaceError, AuthenticationError, DatabaseError, RSConnectionError, DataError, IntegrityError, ProgrammingError, NotSupportedError) from client import Client
Support Sort Order For TEXT Index
Support Sort Order For TEXT Index
Python
apache-2.0
RealGeeks/asyncmongo
#!/bin/env python # # Copyright 2010 bit.ly # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ AsyncMongo is an asynchronous library for accessing mongo http://github.com/bitly/asyncmongo """ try: import bson except ImportError: raise ImportError("bson library not installed. Install pymongo >= 1.9 https://github.com/mongodb/mongo-python-driver") # also update in setup.py version = "1.3" version_info = (1, 3) ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 """Descending sort order.""" GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`""" TEXT = '{ $meta: "textScore" }' """TEXT Index sort order.""" from errors import (Error, InterfaceError, AuthenticationError, DatabaseError, RSConnectionError, DataError, IntegrityError, ProgrammingError, NotSupportedError) from client import Client
Support Sort Order For TEXT Index #!/bin/env python # # Copyright 2010 bit.ly # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ AsyncMongo is an asynchronous library for accessing mongo http://github.com/bitly/asyncmongo """ try: import bson except ImportError: raise ImportError("bson library not installed. Install pymongo >= 1.9 https://github.com/mongodb/mongo-python-driver") # also update in setup.py version = "1.3" version_info = (1, 3) ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 """Descending sort order.""" GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`""" from errors import (Error, InterfaceError, AuthenticationError, DatabaseError, RSConnectionError, DataError, IntegrityError, ProgrammingError, NotSupportedError) from client import Client
dc4307a781e34bf051b20b18935d4939b2de1f8e
examples/unicode_commands.py
examples/unicode_commands.py
#!/usr/bin/env python # coding=utf-8 """A simple example demonstrating support for unicode command names. """ import math import cmd2 class UnicodeApp(cmd2.Cmd): """Example cmd2 application with unicode command names.""" def __init__(self): super().__init__() self.intro = 'Welcome the Unicode example app. Note the full Unicode support: 😇 💩' def do_𝛑print(self, _): """This command prints 𝛑 to 5 decimal places.""" self.poutput("𝛑 = {0:.6}".format(math.pi)) def do_你好(self, arg): """This command says hello in Chinese (Mandarin).""" self.poutput("你好 " + arg) if __name__ == '__main__': app = UnicodeApp() app.cmdloop()
#!/usr/bin/env python # coding=utf-8 """A simple example demonstrating support for unicode command names. """ import math import cmd2 class UnicodeApp(cmd2.Cmd): """Example cmd2 application with unicode command names.""" def __init__(self): super().__init__() self.intro = 'Welcome the Unicode example app. Note the full Unicode support: 😇 💩' def do_𝛑print(self, _): """This command prints 𝛑 to 5 decimal places.""" self.poutput("𝛑 = {0:.6}".format(math.pi)) def do_你好(self, arg): """This command says hello in Chinese (Mandarin).""" self.poutput("你好 " + arg) if __name__ == '__main__': app = UnicodeApp() app.cmdloop()
Fix flake8 error due to extra blank line in example
Fix flake8 error due to extra blank line in example
Python
mit
python-cmd2/cmd2,python-cmd2/cmd2
#!/usr/bin/env python # coding=utf-8 """A simple example demonstrating support for unicode command names. """ import math import cmd2 class UnicodeApp(cmd2.Cmd): """Example cmd2 application with unicode command names.""" def __init__(self): super().__init__() self.intro = 'Welcome the Unicode example app. Note the full Unicode support: 😇 💩' def do_𝛑print(self, _): """This command prints 𝛑 to 5 decimal places.""" self.poutput("𝛑 = {0:.6}".format(math.pi)) def do_你好(self, arg): """This command says hello in Chinese (Mandarin).""" self.poutput("你好 " + arg) if __name__ == '__main__': app = UnicodeApp() app.cmdloop()
Fix flake8 error due to extra blank line in example #!/usr/bin/env python # coding=utf-8 """A simple example demonstrating support for unicode command names. """ import math import cmd2 class UnicodeApp(cmd2.Cmd): """Example cmd2 application with unicode command names.""" def __init__(self): super().__init__() self.intro = 'Welcome the Unicode example app. Note the full Unicode support: 😇 💩' def do_𝛑print(self, _): """This command prints 𝛑 to 5 decimal places.""" self.poutput("𝛑 = {0:.6}".format(math.pi)) def do_你好(self, arg): """This command says hello in Chinese (Mandarin).""" self.poutput("你好 " + arg) if __name__ == '__main__': app = UnicodeApp() app.cmdloop()
f22945907bafb189645800db1e9ca804104b06db
setup.py
setup.py
""" The setup package to install TensorPy dependencies *> This does NOT include TensorFlow installation *> To install TensorFlow, use "./install_tensorflow.sh" """ from setuptools import setup, find_packages # noqa setup( name='tensorpy', version='1.0.1', url='http://tensorpy.com', author='Michael Mintz', author_email='@mintzworld', maintainer='Michael Mintz', description='The fast & easy way to get started with Tensorflow', license='The MIT License', install_requires=[ 'requests==2.11.1', 'six>=1.10.0', 'Pillow==3.4.1', 'BeautifulSoup==3.2.1', ], packages=['tensorpy'], )
""" The setup package to install TensorPy dependencies *> This does NOT include TensorFlow installation *> To install TensorFlow, use "./install_tensorflow.sh" """ from setuptools import setup, find_packages # noqa setup( name='tensorpy', version='1.0.1', url='http://tensorpy.com', author='Michael Mintz', author_email='@mintzworld', maintainer='Michael Mintz', description='The fast & easy way to get started with Tensorflow', license='The MIT License', install_requires=[ 'requests==2.11.1', 'six==1.10.0', 'Pillow==3.4.1', 'BeautifulSoup==3.2.1', ], packages=['tensorpy'], )
Update the "six" to force version 1.10.0
Update the "six" to force version 1.10.0
Python
mit
TensorPy/TensorPy,TensorPy/TensorPy
""" The setup package to install TensorPy dependencies *> This does NOT include TensorFlow installation *> To install TensorFlow, use "./install_tensorflow.sh" """ from setuptools import setup, find_packages # noqa setup( name='tensorpy', version='1.0.1', url='http://tensorpy.com', author='Michael Mintz', author_email='@mintzworld', maintainer='Michael Mintz', description='The fast & easy way to get started with Tensorflow', license='The MIT License', install_requires=[ 'requests==2.11.1', 'six==1.10.0', 'Pillow==3.4.1', 'BeautifulSoup==3.2.1', ], packages=['tensorpy'], )
Update the "six" to force version 1.10.0 """ The setup package to install TensorPy dependencies *> This does NOT include TensorFlow installation *> To install TensorFlow, use "./install_tensorflow.sh" """ from setuptools import setup, find_packages # noqa setup( name='tensorpy', version='1.0.1', url='http://tensorpy.com', author='Michael Mintz', author_email='@mintzworld', maintainer='Michael Mintz', description='The fast & easy way to get started with Tensorflow', license='The MIT License', install_requires=[ 'requests==2.11.1', 'six>=1.10.0', 'Pillow==3.4.1', 'BeautifulSoup==3.2.1', ], packages=['tensorpy'], )
490ce27b6e9213cd9200b6fb42e7676af58abd58
zou/app/models/custom_action.py
zou/app/models/custom_action.py
from zou.app import db from zou.app.models.serializer import SerializerMixin from zou.app.models.base import BaseMixin class CustomAction(db.Model, BaseMixin, SerializerMixin): name = db.Column(db.String(80), nullable=False) url = db.Column(db.String(400))
from zou.app import db from zou.app.models.serializer import SerializerMixin from zou.app.models.base import BaseMixin class CustomAction(db.Model, BaseMixin, SerializerMixin): name = db.Column(db.String(80), nullable=False) url = db.Column(db.String(400)) entity_type = db.Column(db.String(40), default="all")
Add entity type column to actions
Add entity type column to actions
Python
agpl-3.0
cgwire/zou
from zou.app import db from zou.app.models.serializer import SerializerMixin from zou.app.models.base import BaseMixin class CustomAction(db.Model, BaseMixin, SerializerMixin): name = db.Column(db.String(80), nullable=False) url = db.Column(db.String(400)) entity_type = db.Column(db.String(40), default="all")
Add entity type column to actions from zou.app import db from zou.app.models.serializer import SerializerMixin from zou.app.models.base import BaseMixin class CustomAction(db.Model, BaseMixin, SerializerMixin): name = db.Column(db.String(80), nullable=False) url = db.Column(db.String(400))
254ef4c3a433bebd8a668f5516d2f2ac707e2943
isUnique.py
isUnique.py
def verifyUnique(string): if len(string) > 128: return False characterHash = [0] * 128 for character in string: hashKey = ord(character)%128 if(characterHash[hashKey] > 0): return False else: characterHash[hashKey] = characterHash[hashKey]+1 return True print verifyUnique('test') # False ,O(n) print verifyUnique('aquickboASDFwnfxjmps><verthlzydg') # True ,O(n)
Verify the given string has unique characters
Verify the given string has unique characters
Python
mit
arunkumarpalaniappan/algorithm_tryouts
def verifyUnique(string): if len(string) > 128: return False characterHash = [0] * 128 for character in string: hashKey = ord(character)%128 if(characterHash[hashKey] > 0): return False else: characterHash[hashKey] = characterHash[hashKey]+1 return True print verifyUnique('test') # False ,O(n) print verifyUnique('aquickboASDFwnfxjmps><verthlzydg') # True ,O(n)
Verify the given string has unique characters
89d9a8a7d6eb5e982d1728433ea2a9dfbd9d1259
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup setup(name = 'i2py', version = '0.2', description = 'Tools to work with i2p.', author = 'contributors.txt', author_email = 'Anonymous', classifiers = [ 'Development Status :: 3 - Alpha', #'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Topic :: Utilities', ], install_requires = [ # If you plan on adding something, make it known why. # Let's try to keep the dependencies minimal, okay? 'bunch', # Needed by i2py.control.pyjsonrpc. 'python-geoip', # Needed by i2py.netdb. 'python-geoip-geolite2', # Needed by i2py.netdb. ], tests_require=['pytest'], url = 'https://github.com/chris-barry/i2py', packages = ['i2py', 'i2py.netdb', 'i2py.control', 'i2py.control.pyjsonrpc'], )
#!/usr/bin/env python from setuptools import setup setup(name = 'i2py', version = '0.3', description = 'Tools to work with i2p.', author = 'See contributors.txt', author_email = 'Anonymous', classifiers = [ 'Development Status :: 3 - Alpha', #'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Topic :: Utilities', ], install_requires = [ # If you plan on adding something, make it known why. # Let's try to keep the dependencies minimal, okay? 'bunch', # Needed by i2py.control.pyjsonrpc. 'python-geoip', # Needed by i2py.netdb. 'python-geoip-geolite2', # Needed by i2py.netdb. ], tests_require=['pytest'], url = 'https://github.com/chris-barry/i2py', packages = ['i2py', 'i2py.netdb', 'i2py.control', 'i2py.control.pyjsonrpc'], )
Change version to 0.3 due to functions changing name.
Change version to 0.3 due to functions changing name.
Python
mit
chris-barry/i2py
#!/usr/bin/env python from setuptools import setup setup(name = 'i2py', version = '0.3', description = 'Tools to work with i2p.', author = 'See contributors.txt', author_email = 'Anonymous', classifiers = [ 'Development Status :: 3 - Alpha', #'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Topic :: Utilities', ], install_requires = [ # If you plan on adding something, make it known why. # Let's try to keep the dependencies minimal, okay? 'bunch', # Needed by i2py.control.pyjsonrpc. 'python-geoip', # Needed by i2py.netdb. 'python-geoip-geolite2', # Needed by i2py.netdb. ], tests_require=['pytest'], url = 'https://github.com/chris-barry/i2py', packages = ['i2py', 'i2py.netdb', 'i2py.control', 'i2py.control.pyjsonrpc'], )
Change version to 0.3 due to functions changing name. #!/usr/bin/env python from setuptools import setup setup(name = 'i2py', version = '0.2', description = 'Tools to work with i2p.', author = 'contributors.txt', author_email = 'Anonymous', classifiers = [ 'Development Status :: 3 - Alpha', #'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Topic :: Utilities', ], install_requires = [ # If you plan on adding something, make it known why. # Let's try to keep the dependencies minimal, okay? 'bunch', # Needed by i2py.control.pyjsonrpc. 'python-geoip', # Needed by i2py.netdb. 'python-geoip-geolite2', # Needed by i2py.netdb. ], tests_require=['pytest'], url = 'https://github.com/chris-barry/i2py', packages = ['i2py', 'i2py.netdb', 'i2py.control', 'i2py.control.pyjsonrpc'], )
cdc40da26edfcb00a1da3125a925232fc947d143
test_fractal.py
test_fractal.py
#!/usr/bin/env py.test # -*- coding: utf-8 -*- # Created on Fri Apr 25 02:33:04 2014 # License is MIT, see COPYING.txt for more details. # @author: Danilo de Jesus da Silva Bellini import os, re, pytest from fractal import generate_fractal, call_kw, cli_parse_args from io import BytesIO from pylab import imread, imsave def show_parameters(fname): """ String with CLI args to show the fractal with the given ``fname`` """ re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*" "(?:([+-]\s*[0-9.]+)\s*)?(.*)") def part_generator(): for part in fname.rsplit(".", 1)[0].split("_"): if "=" in part: yield "--" + part else: yield " ".join(filter(lambda x: x, re_complex.match(part).groups())) yield "--show" return " ".join(part_generator()) def to_dict_params(fname): """ Get full kwargs from file name """ return cli_parse_args(show_parameters(fname).split()) @pytest.mark.parametrize("fname", os.listdir("images")) def test_file_image(fname): ext = os.path.splitext(fname)[-1][len(os.path.extsep):] kwargs = to_dict_params(fname) # Creates the image in memory mem = BytesIO() fractal_data = call_kw(generate_fractal, kwargs) imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext) mem.seek(0) # Return stream psition back for reading # Comparison pixel-by-pixel img_file = imread("images/" + fname) img_mem = imread(mem, format=ext) assert img_file.tolist() == img_mem.tolist()
Test with each example image in the repository
Test with each example image in the repository
Python
mit
danilobellini/fractals
#!/usr/bin/env py.test # -*- coding: utf-8 -*- # Created on Fri Apr 25 02:33:04 2014 # License is MIT, see COPYING.txt for more details. # @author: Danilo de Jesus da Silva Bellini import os, re, pytest from fractal import generate_fractal, call_kw, cli_parse_args from io import BytesIO from pylab import imread, imsave def show_parameters(fname): """ String with CLI args to show the fractal with the given ``fname`` """ re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*" "(?:([+-]\s*[0-9.]+)\s*)?(.*)") def part_generator(): for part in fname.rsplit(".", 1)[0].split("_"): if "=" in part: yield "--" + part else: yield " ".join(filter(lambda x: x, re_complex.match(part).groups())) yield "--show" return " ".join(part_generator()) def to_dict_params(fname): """ Get full kwargs from file name """ return cli_parse_args(show_parameters(fname).split()) @pytest.mark.parametrize("fname", os.listdir("images")) def test_file_image(fname): ext = os.path.splitext(fname)[-1][len(os.path.extsep):] kwargs = to_dict_params(fname) # Creates the image in memory mem = BytesIO() fractal_data = call_kw(generate_fractal, kwargs) imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext) mem.seek(0) # Return stream psition back for reading # Comparison pixel-by-pixel img_file = imread("images/" + fname) img_mem = imread(mem, format=ext) assert img_file.tolist() == img_mem.tolist()
Test with each example image in the repository
05ba498867ff16c4221dcd758d5cdef9ee884b27
modules/test_gitdata.py
modules/test_gitdata.py
from nose import with_setup from nose.tools import * import os import sys from gitdata import GitData import simplejson as json def test_fetch(): gd = GitData(repo="./treenexus") study_id = 438 study_nexson = gd.fetch_study(study_id) valid = 1 try: json.loads(study_nexson) except: valid = 0 assert valid, "fetch_study(%s) returned valid JSON" % study_id def test_write(): gd = GitData(repo="./treenexus") author = "John Doe <[email protected]>" content = '{"foo":"bar"}' study_id = 999 branch = "johndoe_study_%s" % study_id new_sha = gd.write_study(study_id,content,branch,author) assert new_sha != "", "new_sha is non-empty" def test_branch_exists(): gd = GitData(repo="./treenexus") exists = gd.branch_exists("nothisdoesnotexist") assert exists == 0, "branch does not exist" exists = gd.branch_exists("master") assert exists, "master branch exists" test_branch_exists() test_fetch() test_write()
import unittest import os import sys from gitdata import GitData import simplejson as json class TestGitData(unittest.TestCase): def test_fetch(self): gd = GitData(repo="./treenexus") study_id = 438 study_nexson = gd.fetch_study(study_id) valid = 1 try: json.loads(study_nexson) except: valid = 0 self.assertTrue( valid, "fetch_study(%s) returned valid JSON" % study_id) def test_write(self): gd = GitData(repo="./treenexus") author = "John Doe <[email protected]>" content = '{"foo":"bar"}' study_id = 9999 branch = "johndoe_study_%s" % study_id new_sha = gd.write_study(study_id,content,branch,author) self.assertTrue( new_sha != "", "new_sha is non-empty") def test_branch_exists(self): gd = GitData(repo="./treenexus") exists = gd.branch_exists("nothisdoesnotexist") self.assertTrue( exists == 0, "branch does not exist") exists = gd.branch_exists("master") self.assertTrue( exists, "master branch exists") def suite(): loader = unittest.TestLoader() testsuite = loader.loadTestsFromTestCase(TestGitData) return testsuite def test_main(): testsuite = suite() runner = unittest.TextTestRunner(sys.stdout, verbosity=2) result = runner.run(testsuite) if __name__ == "__main__": test_main()
Convert GitData tests to a unittest suite
Convert GitData tests to a unittest suite
Python
bsd-2-clause
OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api
import unittest import os import sys from gitdata import GitData import simplejson as json class TestGitData(unittest.TestCase): def test_fetch(self): gd = GitData(repo="./treenexus") study_id = 438 study_nexson = gd.fetch_study(study_id) valid = 1 try: json.loads(study_nexson) except: valid = 0 self.assertTrue( valid, "fetch_study(%s) returned valid JSON" % study_id) def test_write(self): gd = GitData(repo="./treenexus") author = "John Doe <[email protected]>" content = '{"foo":"bar"}' study_id = 9999 branch = "johndoe_study_%s" % study_id new_sha = gd.write_study(study_id,content,branch,author) self.assertTrue( new_sha != "", "new_sha is non-empty") def test_branch_exists(self): gd = GitData(repo="./treenexus") exists = gd.branch_exists("nothisdoesnotexist") self.assertTrue( exists == 0, "branch does not exist") exists = gd.branch_exists("master") self.assertTrue( exists, "master branch exists") def suite(): loader = unittest.TestLoader() testsuite = loader.loadTestsFromTestCase(TestGitData) return testsuite def test_main(): testsuite = suite() runner = unittest.TextTestRunner(sys.stdout, verbosity=2) result = runner.run(testsuite) if __name__ == "__main__": test_main()
Convert GitData tests to a unittest suite from nose import with_setup from nose.tools import * import os import sys from gitdata import GitData import simplejson as json def test_fetch(): gd = GitData(repo="./treenexus") study_id = 438 study_nexson = gd.fetch_study(study_id) valid = 1 try: json.loads(study_nexson) except: valid = 0 assert valid, "fetch_study(%s) returned valid JSON" % study_id def test_write(): gd = GitData(repo="./treenexus") author = "John Doe <[email protected]>" content = '{"foo":"bar"}' study_id = 999 branch = "johndoe_study_%s" % study_id new_sha = gd.write_study(study_id,content,branch,author) assert new_sha != "", "new_sha is non-empty" def test_branch_exists(): gd = GitData(repo="./treenexus") exists = gd.branch_exists("nothisdoesnotexist") assert exists == 0, "branch does not exist" exists = gd.branch_exists("master") assert exists, "master branch exists" test_branch_exists() test_fetch() test_write()
c61b08475d82d57dae4349e1c4aa3e58fd7d8256
src/sentry/api/serializers/models/grouptagvalue.py
src/sentry/api/serializers/models/grouptagvalue.py
from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import GroupTagValue @register(GroupTagValue) class GroupTagValueSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'key': obj.key, 'value': obj.value, 'count': obj.times_seen, 'lastSeen': obj.last_seen, 'firstSeen': obj.first_seen, } return d
from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import GroupTagValue, TagValue @register(GroupTagValue) class GroupTagValueSerializer(Serializer): def get_attrs(self, item_list, user): assert len(set(i.key for i in item_list)) < 2 tagvalues = dict( (t.value, t) for t in TagValue.objects.filter( project=item_list[0].project, key=item_list[0].key, value__in=[i.value for i in item_list] ) ) result = {} for item in item_list: result[item] = { 'name': tagvalues[item.value].get_label(), } return result def serialize(self, obj, attrs, user): d = { 'name': attrs['name'], 'key': obj.key, 'value': obj.value, 'count': obj.times_seen, 'lastSeen': obj.last_seen, 'firstSeen': obj.first_seen, } return d
Implement labels on group tag values
Implement labels on group tag values
Python
bsd-3-clause
ngonzalvez/sentry,mvaled/sentry,daevaorn/sentry,kevinlondon/sentry,imankulov/sentry,gencer/sentry,korealerts1/sentry,Natim/sentry,felixbuenemann/sentry,JamesMura/sentry,JackDanger/sentry,fotinakis/sentry,JackDanger/sentry,jean/sentry,looker/sentry,gencer/sentry,looker/sentry,gencer/sentry,looker/sentry,ngonzalvez/sentry,looker/sentry,fotinakis/sentry,BuildingLink/sentry,gencer/sentry,hongliang5623/sentry,Kryz/sentry,zenefits/sentry,BayanGroup/sentry,JamesMura/sentry,wong2/sentry,JamesMura/sentry,jean/sentry,fuziontech/sentry,zenefits/sentry,JackDanger/sentry,nicholasserra/sentry,JamesMura/sentry,fotinakis/sentry,korealerts1/sentry,imankulov/sentry,daevaorn/sentry,mitsuhiko/sentry,ifduyue/sentry,mvaled/sentry,Natim/sentry,beeftornado/sentry,alexm92/sentry,nicholasserra/sentry,zenefits/sentry,BuildingLink/sentry,mitsuhiko/sentry,korealerts1/sentry,ifduyue/sentry,Kryz/sentry,hongliang5623/sentry,ifduyue/sentry,gencer/sentry,BayanGroup/sentry,Kryz/sentry,alexm92/sentry,Natim/sentry,mvaled/sentry,wong2/sentry,BayanGroup/sentry,ifduyue/sentry,BuildingLink/sentry,ifduyue/sentry,felixbuenemann/sentry,alexm92/sentry,daevaorn/sentry,BuildingLink/sentry,songyi199111/sentry,mvaled/sentry,wong2/sentry,fuziontech/sentry,ngonzalvez/sentry,looker/sentry,JamesMura/sentry,songyi199111/sentry,jean/sentry,nicholasserra/sentry,mvaled/sentry,BuildingLink/sentry,songyi199111/sentry,fotinakis/sentry,jean/sentry,hongliang5623/sentry,felixbuenemann/sentry,kevinlondon/sentry,mvaled/sentry,beeftornado/sentry,daevaorn/sentry,zenefits/sentry,fuziontech/sentry,imankulov/sentry,zenefits/sentry,kevinlondon/sentry,beeftornado/sentry,jean/sentry
from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import GroupTagValue, TagValue @register(GroupTagValue) class GroupTagValueSerializer(Serializer): def get_attrs(self, item_list, user): assert len(set(i.key for i in item_list)) < 2 tagvalues = dict( (t.value, t) for t in TagValue.objects.filter( project=item_list[0].project, key=item_list[0].key, value__in=[i.value for i in item_list] ) ) result = {} for item in item_list: result[item] = { 'name': tagvalues[item.value].get_label(), } return result def serialize(self, obj, attrs, user): d = { 'name': attrs['name'], 'key': obj.key, 'value': obj.value, 'count': obj.times_seen, 'lastSeen': obj.last_seen, 'firstSeen': obj.first_seen, } return d
Implement labels on group tag values from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import GroupTagValue @register(GroupTagValue) class GroupTagValueSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'key': obj.key, 'value': obj.value, 'count': obj.times_seen, 'lastSeen': obj.last_seen, 'firstSeen': obj.first_seen, } return d
6aa8f148b3b3975363d5d4a763f5abb45ea6cbd8
databin/parsers/__init__.py
databin/parsers/__init__.py
from databin.parsers.util import ParseException from databin.parsers.simple import parse_csv, parse_tsv from databin.parsers.psql import parse_psql PARSERS = [ ('Comma-Separated Values', 'csv', parse_csv), ('Tab-Separated Values', 'tsv', parse_tsv), ('Excel copy & paste', 'excel', parse_tsv), ('psql Shell', 'psql', parse_psql), ] def parse(format, data): for name, key, func in PARSERS: if key == format: return func(data) raise ParseException() def get_parsers(): for name, key, func in PARSERS: yield (key, name)
from databin.parsers.util import ParseException from databin.parsers.simple import parse_csv, parse_tsv from databin.parsers.psql import parse_psql PARSERS = [ ('Excel copy & paste', 'excel', parse_tsv), ('Comma-Separated Values', 'csv', parse_csv), ('Tab-Separated Values', 'tsv', parse_tsv), ('psql Shell', 'psql', parse_psql), ] def parse(format, data): for name, key, func in PARSERS: if key == format: return func(data) raise ParseException() def get_parsers(): for name, key, func in PARSERS: yield (key, name)
Make excel format the default
Make excel format the default
Python
mit
LeTristanB/Pastable,pudo/databin,LeTristanB/Pastable
from databin.parsers.util import ParseException from databin.parsers.simple import parse_csv, parse_tsv from databin.parsers.psql import parse_psql PARSERS = [ ('Excel copy & paste', 'excel', parse_tsv), ('Comma-Separated Values', 'csv', parse_csv), ('Tab-Separated Values', 'tsv', parse_tsv), ('psql Shell', 'psql', parse_psql), ] def parse(format, data): for name, key, func in PARSERS: if key == format: return func(data) raise ParseException() def get_parsers(): for name, key, func in PARSERS: yield (key, name)
Make excel format the default from databin.parsers.util import ParseException from databin.parsers.simple import parse_csv, parse_tsv from databin.parsers.psql import parse_psql PARSERS = [ ('Comma-Separated Values', 'csv', parse_csv), ('Tab-Separated Values', 'tsv', parse_tsv), ('Excel copy & paste', 'excel', parse_tsv), ('psql Shell', 'psql', parse_psql), ] def parse(format, data): for name, key, func in PARSERS: if key == format: return func(data) raise ParseException() def get_parsers(): for name, key, func in PARSERS: yield (key, name)
8266b46f8710e48cf93778a90cc0c82f4f9dcbe8
l10n_br_nfe/models/__init__.py
l10n_br_nfe/models/__init__.py
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from . import res_country_state from . import res_partner from . import res_company from . import product_product from . import document_related from . import document from . import document_line from . import res_city from . import res_config_settings from . import cfop from . import document_cancel from . import document_correction from . import document_invalidate_number from . import spec_mixin
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from . import res_country_state from . import res_partner from . import res_company from . import product_product from . import document_related from . import document from . import document_line from . import res_city from . import res_config_settings from . import cfop # from . import document_cancel # from . import document_correction from . import document_invalidate_number from . import spec_mixin
Disable import of document_cancel and document_correction
[REF] Disable import of document_cancel and document_correction
Python
agpl-3.0
OCA/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from . import res_country_state from . import res_partner from . import res_company from . import product_product from . import document_related from . import document from . import document_line from . import res_city from . import res_config_settings from . import cfop # from . import document_cancel # from . import document_correction from . import document_invalidate_number from . import spec_mixin
[REF] Disable import of document_cancel and document_correction # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from . import res_country_state from . import res_partner from . import res_company from . import product_product from . import document_related from . import document from . import document_line from . import res_city from . import res_config_settings from . import cfop from . import document_cancel from . import document_correction from . import document_invalidate_number from . import spec_mixin
5bc3e6a3fb112b529f738142850860dd98a9d428
tests/runtests.py
tests/runtests.py
import glob import os import unittest def build_test_suite(): suite = unittest.TestSuite() for test_case in glob.glob('tests/test_*.py'): modname = os.path.splitext(test_case)[0] modname = modname.replace('/', '.') module = __import__(modname, {}, {}, ['1']) suite.addTest(unittest.TestLoader().loadTestsFromModule(module)) return suite if __name__ == "__main__": suite = build_test_suite() runner = unittest.TextTestRunner() runner.run(suite)
import glob import os import unittest import sys def build_test_suite(): suite = unittest.TestSuite() for test_case in glob.glob('tests/test_*.py'): modname = os.path.splitext(test_case)[0] modname = modname.replace('/', '.') module = __import__(modname, {}, {}, ['1']) suite.addTest(unittest.TestLoader().loadTestsFromModule(module)) return suite if __name__ == "__main__": suite = build_test_suite() runner = unittest.TextTestRunner() result = runner.run(suite) sys.exit(not result.wasSuccessful())
Make unittest return exit code 1 on failure
Make unittest return exit code 1 on failure This is to allow travis to catch test failures
Python
bsd-3-clause
jorgecarleitao/pyglet-gui
import glob import os import unittest import sys def build_test_suite(): suite = unittest.TestSuite() for test_case in glob.glob('tests/test_*.py'): modname = os.path.splitext(test_case)[0] modname = modname.replace('/', '.') module = __import__(modname, {}, {}, ['1']) suite.addTest(unittest.TestLoader().loadTestsFromModule(module)) return suite if __name__ == "__main__": suite = build_test_suite() runner = unittest.TextTestRunner() result = runner.run(suite) sys.exit(not result.wasSuccessful())
Make unittest return exit code 1 on failure This is to allow travis to catch test failures import glob import os import unittest def build_test_suite(): suite = unittest.TestSuite() for test_case in glob.glob('tests/test_*.py'): modname = os.path.splitext(test_case)[0] modname = modname.replace('/', '.') module = __import__(modname, {}, {}, ['1']) suite.addTest(unittest.TestLoader().loadTestsFromModule(module)) return suite if __name__ == "__main__": suite = build_test_suite() runner = unittest.TextTestRunner() runner.run(suite)
3af9e49d36aedd08d075c4aae027b7d7565d4579
src/redisboard/views.py
src/redisboard/views.py
from django.shortcuts import render def _get_key_details(conn, db): conn.execute_command('SELECT', db) keys = conn.keys() key_details = {} for key in keys: details = conn.execute_command('DEBUG', 'OBJECT', key) key_details[key] = { 'type': conn.type(key), 'details': dict( i.split(':') for i in details.split() if ':' in i ), 'ttl': conn.ttl(key), } return key_details def inspect(request, server): stats = server.stats if stats['status'] == 'UP': conn = server.connection databases = [name[2:] for name in conn.info() if name.startswith('db')] database_details = {} for db in databases: database_details[db] = _get_key_details(conn, db) else: database_details = {} return render(request, "redisboard/inspect.html", { 'databases': database_details, 'original': server, 'stats': stats, 'app_label': 'redisboard', })
from django.shortcuts import render from django.utils.datastructures import SortedDict def _get_key_details(conn, db): conn.execute_command('SELECT', db) keys = conn.keys() key_details = {} for key in keys: details = conn.execute_command('DEBUG', 'OBJECT', key) key_details[key] = { 'type': conn.type(key), 'details': dict( i.split(':') for i in details.split() if ':' in i ), 'ttl': conn.ttl(key), } return key_details def inspect(request, server): stats = server.stats if stats['status'] == 'UP': conn = server.connection databases = sorted(name[2:] for name in conn.info() if name.startswith('db')) database_details = SortedDict() for db in databases: database_details[db] = _get_key_details(conn, db) else: database_details = {} return render(request, "redisboard/inspect.html", { 'databases': database_details, 'original': server, 'stats': stats, 'app_label': 'redisboard', })
Sort the database order in the inspect page.
Sort the database order in the inspect page.
Python
bsd-2-clause
ionelmc/django-redisboard,jolks/django-redisboard,jolks/django-redisboard,artscoop/django-redisboard,artscoop/django-redisboard,ionelmc/django-redisboard,jolks/django-redisboard,artscoop/django-redisboard
from django.shortcuts import render from django.utils.datastructures import SortedDict def _get_key_details(conn, db): conn.execute_command('SELECT', db) keys = conn.keys() key_details = {} for key in keys: details = conn.execute_command('DEBUG', 'OBJECT', key) key_details[key] = { 'type': conn.type(key), 'details': dict( i.split(':') for i in details.split() if ':' in i ), 'ttl': conn.ttl(key), } return key_details def inspect(request, server): stats = server.stats if stats['status'] == 'UP': conn = server.connection databases = sorted(name[2:] for name in conn.info() if name.startswith('db')) database_details = SortedDict() for db in databases: database_details[db] = _get_key_details(conn, db) else: database_details = {} return render(request, "redisboard/inspect.html", { 'databases': database_details, 'original': server, 'stats': stats, 'app_label': 'redisboard', })
Sort the database order in the inspect page. from django.shortcuts import render def _get_key_details(conn, db): conn.execute_command('SELECT', db) keys = conn.keys() key_details = {} for key in keys: details = conn.execute_command('DEBUG', 'OBJECT', key) key_details[key] = { 'type': conn.type(key), 'details': dict( i.split(':') for i in details.split() if ':' in i ), 'ttl': conn.ttl(key), } return key_details def inspect(request, server): stats = server.stats if stats['status'] == 'UP': conn = server.connection databases = [name[2:] for name in conn.info() if name.startswith('db')] database_details = {} for db in databases: database_details[db] = _get_key_details(conn, db) else: database_details = {} return render(request, "redisboard/inspect.html", { 'databases': database_details, 'original': server, 'stats': stats, 'app_label': 'redisboard', })
e8506331cfa5e14029e3de4ccb16c5e0267e85b3
manoseimas/votings/nodes.py
manoseimas/votings/nodes.py
from zope.component import adapts from zope.component import provideAdapter from sboard.nodes import CreateView from sboard.nodes import DetailsView from .forms import PolicyIssueForm from .interfaces import IVoting from .interfaces import IPolicyIssue class VotingView(DetailsView): adapts(IVoting) templates = { 'details': 'votings/voting_details.html', } provideAdapter(VotingView) class CreatePolicyIssueView(CreateView): adapts(object, IPolicyIssue) form = PolicyIssueForm provideAdapter(CreatePolicyIssueView, name="create")
from zope.component import adapts from zope.component import provideAdapter from sboard.nodes import CreateView from sboard.nodes import DetailsView from sboard.nodes import TagListView from .forms import PolicyIssueForm from .interfaces import IVoting from .interfaces import IPolicyIssue class VotingView(DetailsView): adapts(IVoting) templates = { 'details': 'votings/voting_details.html', } provideAdapter(VotingView) class CreatePolicyIssueView(CreateView): adapts(object, IPolicyIssue) form = PolicyIssueForm provideAdapter(CreatePolicyIssueView, name="create") provideAdapter(TagListView, (IPolicyIssue,))
Use TagListView for IPolicyIssue as default view.
Use TagListView for IPolicyIssue as default view.
Python
agpl-3.0
ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt
from zope.component import adapts from zope.component import provideAdapter from sboard.nodes import CreateView from sboard.nodes import DetailsView from sboard.nodes import TagListView from .forms import PolicyIssueForm from .interfaces import IVoting from .interfaces import IPolicyIssue class VotingView(DetailsView): adapts(IVoting) templates = { 'details': 'votings/voting_details.html', } provideAdapter(VotingView) class CreatePolicyIssueView(CreateView): adapts(object, IPolicyIssue) form = PolicyIssueForm provideAdapter(CreatePolicyIssueView, name="create") provideAdapter(TagListView, (IPolicyIssue,))
Use TagListView for IPolicyIssue as default view. from zope.component import adapts from zope.component import provideAdapter from sboard.nodes import CreateView from sboard.nodes import DetailsView from .forms import PolicyIssueForm from .interfaces import IVoting from .interfaces import IPolicyIssue class VotingView(DetailsView): adapts(IVoting) templates = { 'details': 'votings/voting_details.html', } provideAdapter(VotingView) class CreatePolicyIssueView(CreateView): adapts(object, IPolicyIssue) form = PolicyIssueForm provideAdapter(CreatePolicyIssueView, name="create")
ec7bbe8ac8715ea22142680f0d880a7d0b71c687
paws/request.py
paws/request.py
from urlparse import parse_qs from utils import cached_property, MultiDict class Request(object): def __init__(self, event, context): self.event = event self.context = context @property def method(self): return self.event['httpMethod'] @property def query(self): return self.event['queryStringParameters'] @cached_property def post(self): return MultiDict(parse_qs(self.event.get('body', '') or '')) @property def stage(self): return self.event['stage'] @property def stageVar(self): return self.event['stageVariables'] @property def params(self): return self.event['pathParameters']
from Cookie import SimpleCookie from urlparse import parse_qs from utils import MultiDict, cached_property class Request(object): def __init__(self, event, context): self.event = event self.context = context @property def method(self): return self.event['httpMethod'] @property def query(self): return self.event['queryStringParameters'] @cached_property def post(self): return MultiDict(parse_qs(self.event.get('body', '') or '')) @cached_property def cookies(self): jar = SimpleCookie() if self.event['headers'].get('Cookie'): jar.load(self.event['headers']['Cookie'].encode('utf-8')) return jar @property def stage(self): return self.event['stage'] @property def stageVar(self): return self.event['stageVariables'] @property def params(self): return self.event['pathParameters']
Add cookies property to Request
Add cookies property to Request
Python
bsd-3-clause
funkybob/paws
from Cookie import SimpleCookie from urlparse import parse_qs from utils import MultiDict, cached_property class Request(object): def __init__(self, event, context): self.event = event self.context = context @property def method(self): return self.event['httpMethod'] @property def query(self): return self.event['queryStringParameters'] @cached_property def post(self): return MultiDict(parse_qs(self.event.get('body', '') or '')) @cached_property def cookies(self): jar = SimpleCookie() if self.event['headers'].get('Cookie'): jar.load(self.event['headers']['Cookie'].encode('utf-8')) return jar @property def stage(self): return self.event['stage'] @property def stageVar(self): return self.event['stageVariables'] @property def params(self): return self.event['pathParameters']
Add cookies property to Request from urlparse import parse_qs from utils import cached_property, MultiDict class Request(object): def __init__(self, event, context): self.event = event self.context = context @property def method(self): return self.event['httpMethod'] @property def query(self): return self.event['queryStringParameters'] @cached_property def post(self): return MultiDict(parse_qs(self.event.get('body', '') or '')) @property def stage(self): return self.event['stage'] @property def stageVar(self): return self.event['stageVariables'] @property def params(self): return self.event['pathParameters']
c31dea7bb9dc104c23cf6960f61d56af86c8dea6
setup.py
setup.py
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup requirements = [ ] test_requirements = [ ] setup( name='adb_android', version='0.1.0', description="Enables android adb in your python script", long_description='This package can be used by everyone who implements some \ android-related stuff on Python and at the same time has to interact with \ android adb. It makes interaction with android adb easier because of proper \ error handling and some useful features.', author='Viktor Malyi', author_email='[email protected]', url='https://github.com/vmalyi/adb', packages=[ 'adb_android', ], package_dir={'adb_android':'adb_android'}, include_package_data=True, install_requires=requirements, license="GNU", keywords='adb, android', #TODO: check compatibitily with >2.7 classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Topic :: Software Development :: Testing', 'Intended Audience :: Developers' ], test_suite='tests', )
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup requirements = [ ] test_requirements = [ ] setup( name='adb_android', version='0.2.0', description="Enables android adb in your python script", long_description='This python package is a wrapper for standard android adb\ implementation. It allows you to execute android adb commands in your \ python script.', author='Viktor Malyi', author_email='[email protected]', url='https://github.com/vmalyi/adb_android', packages=[ 'adb_android', ], package_dir={'adb_android':'adb_android'}, include_package_data=True, install_requires=requirements, license="GNU", keywords='adb, android', #TODO: check compatibitily with >2.7 classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Topic :: Software Development :: Testing', 'Intended Audience :: Developers' ], test_suite='tests', )
Update package description and version
Update package description and version
Python
bsd-3-clause
solarce/adb_android,vmalyi/adb_android
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup requirements = [ ] test_requirements = [ ] setup( name='adb_android', version='0.2.0', description="Enables android adb in your python script", long_description='This python package is a wrapper for standard android adb\ implementation. It allows you to execute android adb commands in your \ python script.', author='Viktor Malyi', author_email='[email protected]', url='https://github.com/vmalyi/adb_android', packages=[ 'adb_android', ], package_dir={'adb_android':'adb_android'}, include_package_data=True, install_requires=requirements, license="GNU", keywords='adb, android', #TODO: check compatibitily with >2.7 classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Topic :: Software Development :: Testing', 'Intended Audience :: Developers' ], test_suite='tests', )
Update package description and version #!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup requirements = [ ] test_requirements = [ ] setup( name='adb_android', version='0.1.0', description="Enables android adb in your python script", long_description='This package can be used by everyone who implements some \ android-related stuff on Python and at the same time has to interact with \ android adb. It makes interaction with android adb easier because of proper \ error handling and some useful features.', author='Viktor Malyi', author_email='[email protected]', url='https://github.com/vmalyi/adb', packages=[ 'adb_android', ], package_dir={'adb_android':'adb_android'}, include_package_data=True, install_requires=requirements, license="GNU", keywords='adb, android', #TODO: check compatibitily with >2.7 classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Topic :: Software Development :: Testing', 'Intended Audience :: Developers' ], test_suite='tests', )
d5597911837967c1f34d1c904282f9464e38e767
flask_controllers/GameModes.py
flask_controllers/GameModes.py
import logging from flask import request from flask.views import MethodView from flask_helpers.build_response import build_response # Import the Game Controller from Game.GameController import GameController class GameModes(MethodView): def get(self): logging.debug("GameModes: GET: Initializing GameObject") game_object = GameController() logging.debug("GameModes: GET: GameObject initialized to {}".format(game_object.save())) logging.debug("GameModes: GET: Checking if textmode flag set") if request.args.get('textmode', None): logging.debug("GameModes: GET: Responding with list of names") response_data = game_object.game_mode_names else: logging.debug("GameModes: GET: Responding with JSON object") response_data = [{"mode": gt.mode, "digits": gt.digits, "digit-type": gt.digit_type, "guesses": gt.guesses_allowed } for gt in game_object.game_modes] logging.debug("GameModes: GET: Return {}".format(response_data)) return build_response( html_status=200, response_data=response_data, response_mimetype="application/json" )
import logging from flask import request from flask.views import MethodView from flask_helpers.build_response import build_response # Import the Game Controller from Game.GameController import GameController class GameModes(MethodView): def get(self): logging.debug("GameModes: GET: Initializing GameObject") game_object = GameController() logging.debug("GameModes: GET: GameObject initialized to {}".format(game_object.save())) logging.debug("GameModes: GET: Checking if textmode flag set") if request.args.get('textmode', None): logging.debug("GameModes: GET: Responding with list of names") response_data = game_object.game_mode_names else: logging.debug("GameModes: GET: Responding with JSON object: {}".format(game_object.game_modes)) response_data = [{"mode": gt.mode, "digits": gt.digits, "digit-type": gt.digit_type, "guesses": gt.guesses_allowed } for gt in game_object.game_modes] logging.debug("GameModes: GET: Return {}".format(response_data)) return build_response( html_status=200, response_data=response_data, response_mimetype="application/json" )
Update logging for message output and consistency.
Update logging for message output and consistency.
Python
apache-2.0
dsandersAzure/python_cowbull_server,dsandersAzure/python_cowbull_server
import logging from flask import request from flask.views import MethodView from flask_helpers.build_response import build_response # Import the Game Controller from Game.GameController import GameController class GameModes(MethodView): def get(self): logging.debug("GameModes: GET: Initializing GameObject") game_object = GameController() logging.debug("GameModes: GET: GameObject initialized to {}".format(game_object.save())) logging.debug("GameModes: GET: Checking if textmode flag set") if request.args.get('textmode', None): logging.debug("GameModes: GET: Responding with list of names") response_data = game_object.game_mode_names else: logging.debug("GameModes: GET: Responding with JSON object: {}".format(game_object.game_modes)) response_data = [{"mode": gt.mode, "digits": gt.digits, "digit-type": gt.digit_type, "guesses": gt.guesses_allowed } for gt in game_object.game_modes] logging.debug("GameModes: GET: Return {}".format(response_data)) return build_response( html_status=200, response_data=response_data, response_mimetype="application/json" )
Update logging for message output and consistency. import logging from flask import request from flask.views import MethodView from flask_helpers.build_response import build_response # Import the Game Controller from Game.GameController import GameController class GameModes(MethodView): def get(self): logging.debug("GameModes: GET: Initializing GameObject") game_object = GameController() logging.debug("GameModes: GET: GameObject initialized to {}".format(game_object.save())) logging.debug("GameModes: GET: Checking if textmode flag set") if request.args.get('textmode', None): logging.debug("GameModes: GET: Responding with list of names") response_data = game_object.game_mode_names else: logging.debug("GameModes: GET: Responding with JSON object") response_data = [{"mode": gt.mode, "digits": gt.digits, "digit-type": gt.digit_type, "guesses": gt.guesses_allowed } for gt in game_object.game_modes] logging.debug("GameModes: GET: Return {}".format(response_data)) return build_response( html_status=200, response_data=response_data, response_mimetype="application/json" )
b97115679929dfe4f69618f756850617f265048f
service/pixelated/config/site.py
service/pixelated/config/site.py
from twisted.web.server import Site, Request class AddCSPHeaderRequest(Request): CSP_HEADER_VALUES = "default-src 'self'; style-src 'self' 'unsafe-inline'" def process(self): self.setHeader('Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Webkit-CSP', self.CSP_HEADER_VALUES) self.setHeader('X-Frame-Options', 'SAMEORIGIN') self.setHeader('X-XSS-Protection', '1; mode=block') self.setHeader('X-Content-Type-Options', 'nosniff') if self.isSecure(): self.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains') Request.process(self) class PixelatedSite(Site): requestFactory = AddCSPHeaderRequest @classmethod def enable_csp_requests(cls): cls.requestFactory = AddCSPHeaderRequest @classmethod def disable_csp_requests(cls): cls.requestFactory = Site.requestFactory
from twisted.web.server import Site, Request class AddSecurityHeadersRequest(Request): CSP_HEADER_VALUES = "default-src 'self'; style-src 'self' 'unsafe-inline'" def process(self): self.setHeader('Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Webkit-CSP', self.CSP_HEADER_VALUES) self.setHeader('X-Frame-Options', 'SAMEORIGIN') self.setHeader('X-XSS-Protection', '1; mode=block') self.setHeader('X-Content-Type-Options', 'nosniff') if self.isSecure(): self.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains') Request.process(self) class PixelatedSite(Site): requestFactory = AddSecurityHeadersRequest @classmethod def enable_csp_requests(cls): cls.requestFactory = AddSecurityHeadersRequest @classmethod def disable_csp_requests(cls): cls.requestFactory = Site.requestFactory
Rename class to match intent
Rename class to match intent
Python
agpl-3.0
pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated/pixelated-user-agent
from twisted.web.server import Site, Request class AddSecurityHeadersRequest(Request): CSP_HEADER_VALUES = "default-src 'self'; style-src 'self' 'unsafe-inline'" def process(self): self.setHeader('Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Webkit-CSP', self.CSP_HEADER_VALUES) self.setHeader('X-Frame-Options', 'SAMEORIGIN') self.setHeader('X-XSS-Protection', '1; mode=block') self.setHeader('X-Content-Type-Options', 'nosniff') if self.isSecure(): self.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains') Request.process(self) class PixelatedSite(Site): requestFactory = AddSecurityHeadersRequest @classmethod def enable_csp_requests(cls): cls.requestFactory = AddSecurityHeadersRequest @classmethod def disable_csp_requests(cls): cls.requestFactory = Site.requestFactory
Rename class to match intent from twisted.web.server import Site, Request class AddCSPHeaderRequest(Request): CSP_HEADER_VALUES = "default-src 'self'; style-src 'self' 'unsafe-inline'" def process(self): self.setHeader('Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Content-Security-Policy', self.CSP_HEADER_VALUES) self.setHeader('X-Webkit-CSP', self.CSP_HEADER_VALUES) self.setHeader('X-Frame-Options', 'SAMEORIGIN') self.setHeader('X-XSS-Protection', '1; mode=block') self.setHeader('X-Content-Type-Options', 'nosniff') if self.isSecure(): self.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains') Request.process(self) class PixelatedSite(Site): requestFactory = AddCSPHeaderRequest @classmethod def enable_csp_requests(cls): cls.requestFactory = AddCSPHeaderRequest @classmethod def disable_csp_requests(cls): cls.requestFactory = Site.requestFactory
fd8caec8567178abe09abc810f1e96bfc4bb531b
calc.py
calc.py
import sys def add_all(nums): return sum(nums) def multiply_all(nums): return reduce(lambda a, b: a * b, nums) if __name__== '__main__': command = sys.argv[1] nums = map(float, sys.argv[2:]) if command == 'add': print(add_all(nums)) elif command == 'multiply': print(multiply_all(sums))
import sys def add_all(nums): return sum(nums) def multiply_all(nums): return reduce(lambda a, b: a * b, nums) if __name__== '__main__': command = sys.argv[1] nums = map(float, sys.argv[2:]) if command == 'add': print(add_all(nums)) elif command == 'multiply': print(multiply_all(nums))
Fix bug in 'multiply' support
Fix bug in 'multiply' support
Python
bsd-3-clause
tanecious/calc
import sys def add_all(nums): return sum(nums) def multiply_all(nums): return reduce(lambda a, b: a * b, nums) if __name__== '__main__': command = sys.argv[1] nums = map(float, sys.argv[2:]) if command == 'add': print(add_all(nums)) elif command == 'multiply': print(multiply_all(nums))
Fix bug in 'multiply' support import sys def add_all(nums): return sum(nums) def multiply_all(nums): return reduce(lambda a, b: a * b, nums) if __name__== '__main__': command = sys.argv[1] nums = map(float, sys.argv[2:]) if command == 'add': print(add_all(nums)) elif command == 'multiply': print(multiply_all(sums))
c15c4a663c257cad6763cf92c50b7ad706017c74
evesrp/views/__init__.py
evesrp/views/__init__.py
from collections import OrderedDict from urllib.parse import urlparse import re from flask import render_template, redirect, url_for, request, abort, jsonify,\ flash, Markup, session from flask.views import View from flask.ext.login import login_user, login_required, logout_user, \ current_user from flask.ext.wtf import Form from flask.ext.principal import identity_changed, AnonymousIdentity from sqlalchemy.orm.exc import NoResultFound from wtforms.fields import StringField, PasswordField, SelectField, \ SubmitField, TextAreaField, HiddenField from wtforms.fields.html5 import URLField, DecimalField from wtforms.widgets import HiddenInput from wtforms.validators import InputRequired, ValidationError, AnyOf, URL from .. import app, auth_methods, db, requests_session, killmail_sources from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \ PayoutRequestsPermission, admin_permission from ..auth.models import User, Group, Division, Pilot from ..models import Request, Modifier, Action @app.route('/') @login_required def index(): return render_template('base.html')
from flask import render_template from flask.ext.login import login_required from .. import app @app.route('/') @login_required def index(): return render_template('base.html')
Remove extraneous imports in the base view package
Remove extraneous imports in the base view package
Python
bsd-2-clause
eskwire/evesrp,eskwire/evesrp,paxswill/evesrp,eskwire/evesrp,paxswill/evesrp,paxswill/evesrp,eskwire/evesrp
from flask import render_template from flask.ext.login import login_required from .. import app @app.route('/') @login_required def index(): return render_template('base.html')
Remove extraneous imports in the base view package from collections import OrderedDict from urllib.parse import urlparse import re from flask import render_template, redirect, url_for, request, abort, jsonify,\ flash, Markup, session from flask.views import View from flask.ext.login import login_user, login_required, logout_user, \ current_user from flask.ext.wtf import Form from flask.ext.principal import identity_changed, AnonymousIdentity from sqlalchemy.orm.exc import NoResultFound from wtforms.fields import StringField, PasswordField, SelectField, \ SubmitField, TextAreaField, HiddenField from wtforms.fields.html5 import URLField, DecimalField from wtforms.widgets import HiddenInput from wtforms.validators import InputRequired, ValidationError, AnyOf, URL from .. import app, auth_methods, db, requests_session, killmail_sources from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \ PayoutRequestsPermission, admin_permission from ..auth.models import User, Group, Division, Pilot from ..models import Request, Modifier, Action @app.route('/') @login_required def index(): return render_template('base.html')
07d2742da2b75d1c23451b76447acf5ec03f41b0
osf/management/commands/update_preprint_share_dates.py
osf/management/commands/update_preprint_share_dates.py
from __future__ import unicode_literals import logging from django.core.management.base import BaseCommand from scripts import utils as script_utils from osf.models import PreprintService from website.preprints.tasks import on_preprint_updated logger = logging.getLogger(__name__) def update_share_preprint_modified_dates(dry_run=False): dates_updated = 0 for preprint in PreprintService.objects.filter(): if preprint.node.date_modified > preprint.date_modified: if not dry_run: on_preprint_updated(preprint._id) dates_updated += 1 return dates_updated class Command(BaseCommand): """ Send more accurate preprint modified dates to Share (max of node.date_modified and preprint.date_modified) """ def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( '--dry', action='store_true', dest='dry_run', help='Say how many preprint updates would be sent to share', ) def handle(self, *args, **options): dry_run = options.get('dry_run', False) if not dry_run: script_utils.add_file_logger(logger, __file__) dates_updated = update_share_preprint_modified_dates() logger.info('Sent %d new preprint modified dates to Share' % dates_updated) else: dates_updated = update_share_preprint_modified_dates(dry_run=True) logger.info('Would have sent %d new preprint modified dates to Share' % dates_updated)
Add management command to update preprint share dates
Add management command to update preprint share dates
Python
apache-2.0
HalcyonChimera/osf.io,laurenrevere/osf.io,crcresearch/osf.io,icereval/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,sloria/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,sloria/osf.io,baylee-d/osf.io,TomBaxter/osf.io,chrisseto/osf.io,binoculars/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,cslzchen/osf.io,mattclark/osf.io,laurenrevere/osf.io,leb2dg/osf.io,erinspace/osf.io,leb2dg/osf.io,laurenrevere/osf.io,erinspace/osf.io,felliott/osf.io,cslzchen/osf.io,crcresearch/osf.io,sloria/osf.io,aaxelb/osf.io,adlius/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,felliott/osf.io,TomBaxter/osf.io,adlius/osf.io,icereval/osf.io,icereval/osf.io,caseyrollins/osf.io,chennan47/osf.io,erinspace/osf.io,pattisdr/osf.io,pattisdr/osf.io,chrisseto/osf.io,cslzchen/osf.io,baylee-d/osf.io,mfraezz/osf.io,cslzchen/osf.io,baylee-d/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,adlius/osf.io,chrisseto/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,felliott/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,pattisdr/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,saradbowman/osf.io,saradbowman/osf.io,chennan47/osf.io,caseyrollins/osf.io,felliott/osf.io,mfraezz/osf.io,binoculars/osf.io,mattclark/osf.io,mattclark/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,chennan47/osf.io
from __future__ import unicode_literals import logging from django.core.management.base import BaseCommand from scripts import utils as script_utils from osf.models import PreprintService from website.preprints.tasks import on_preprint_updated logger = logging.getLogger(__name__) def update_share_preprint_modified_dates(dry_run=False): dates_updated = 0 for preprint in PreprintService.objects.filter(): if preprint.node.date_modified > preprint.date_modified: if not dry_run: on_preprint_updated(preprint._id) dates_updated += 1 return dates_updated class Command(BaseCommand): """ Send more accurate preprint modified dates to Share (max of node.date_modified and preprint.date_modified) """ def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( '--dry', action='store_true', dest='dry_run', help='Say how many preprint updates would be sent to share', ) def handle(self, *args, **options): dry_run = options.get('dry_run', False) if not dry_run: script_utils.add_file_logger(logger, __file__) dates_updated = update_share_preprint_modified_dates() logger.info('Sent %d new preprint modified dates to Share' % dates_updated) else: dates_updated = update_share_preprint_modified_dates(dry_run=True) logger.info('Would have sent %d new preprint modified dates to Share' % dates_updated)
Add management command to update preprint share dates
de0bbf978695d206189ee4effb124234968525cb
django_afip/views.py
django_afip/views.py
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): """Renders a receipt as HTML.""" def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): """Renders a receipt as a PDF, prompting to download it.""" def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response class ReceiptPDFDisplayView(View): """ Renders a receipt as a PDF. Browsers should render the file, rather than prompt to download it. """ def get(self, request, pk): response = HttpResponse(content_type='application/pdf') generate_receipt_pdf(pk, response) return response
Add a view to display PDF receipts
Add a view to display PDF receipts Fixes #23 Closes !7 Closes !8
Python
isc
hobarrera/django-afip,hobarrera/django-afip
from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): """Renders a receipt as HTML.""" def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): """Renders a receipt as a PDF, prompting to download it.""" def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response class ReceiptPDFDisplayView(View): """ Renders a receipt as a PDF. Browsers should render the file, rather than prompt to download it. """ def get(self, request, pk): response = HttpResponse(content_type='application/pdf') generate_receipt_pdf(pk, response) return response
Add a view to display PDF receipts Fixes #23 Closes !7 Closes !8 from django.http import HttpResponse from django.utils.translation import ugettext as _ from django.views.generic import View from .pdf import generate_receipt_pdf class ReceiptHTMLView(View): def get(self, request, pk): return HttpResponse( generate_receipt_pdf(pk, request, True), ) class ReceiptPDFView(View): def get(self, request, pk): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=' + \ _('receipt %s.pdf' % pk) generate_receipt_pdf(pk, response) return response
6802be4498bb1143f4ce4c024a3fd82921eeb937
setup.py
setup.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calibre_books.settings') setup( name='calibre-books', author='Adam Bogdał', author_email='[email protected]', description="Calibre server in Django", license='BSD', version='0.0.1', packages=find_packages(), include_package_data=True, install_requires=[ 'Django>=1.6', 'django-bootstrap3>=4.8.2', 'django-dropbox==0.0.2', 'dj_database_url>=0.2.2', 'python-memcached==1.53', 'PIL==1.1.7', 'gunicorn==19.1.0', 'psycopg2==2.5.3', ], entry_points={ 'console_scripts': ['manage.py = calibre_books:manage']}, )
#! /usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calibre_books.settings') setup( name='calibre-books', author='Adam Bogdał', author_email='[email protected]', description="Calibre server in Django", license='BSD', version='0.0.1', packages=find_packages(), include_package_data=True, install_requires=[ 'Django>=1.6', 'django-bootstrap3>=4.8.2', 'django-dropbox==0.0.2', 'dj_database_url>=0.2.2', 'python-memcached==1.53', 'Pillow==2.5.2', 'gunicorn==19.1.0', 'psycopg2==2.5.3', ], entry_points={ 'console_scripts': ['manage.py = calibre_books:manage']}, )
Use pillow instead of ordinary pil
Use pillow instead of ordinary pil
Python
bsd-2-clause
bogdal/calibre-books,bogdal/calibre-books
#! /usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calibre_books.settings') setup( name='calibre-books', author='Adam Bogdał', author_email='[email protected]', description="Calibre server in Django", license='BSD', version='0.0.1', packages=find_packages(), include_package_data=True, install_requires=[ 'Django>=1.6', 'django-bootstrap3>=4.8.2', 'django-dropbox==0.0.2', 'dj_database_url>=0.2.2', 'python-memcached==1.53', 'Pillow==2.5.2', 'gunicorn==19.1.0', 'psycopg2==2.5.3', ], entry_points={ 'console_scripts': ['manage.py = calibre_books:manage']}, )
Use pillow instead of ordinary pil #! /usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calibre_books.settings') setup( name='calibre-books', author='Adam Bogdał', author_email='[email protected]', description="Calibre server in Django", license='BSD', version='0.0.1', packages=find_packages(), include_package_data=True, install_requires=[ 'Django>=1.6', 'django-bootstrap3>=4.8.2', 'django-dropbox==0.0.2', 'dj_database_url>=0.2.2', 'python-memcached==1.53', 'PIL==1.1.7', 'gunicorn==19.1.0', 'psycopg2==2.5.3', ], entry_points={ 'console_scripts': ['manage.py = calibre_books:manage']}, )
fd32bdaa00c61d11edcf0ca60e4058e6d0b6b2d0
backend/pycon/settings/prod.py
backend/pycon/settings/prod.py
import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from .base import * # noqa from .base import env SECRET_KEY = env("SECRET_KEY") # CELERY_BROKER_URL = env("CELERY_BROKER_URL") USE_SCHEDULER = False # if FRONTEND_URL == "http://testfrontend.it/": # raise ImproperlyConfigured("Please configure FRONTEND_URL for production") SENTRY_DSN = env("SENTRY_DSN", default="") if SENTRY_DSN: sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()]) SLACK_INCOMING_WEBHOOK_URL = env("SLACK_INCOMING_WEBHOOK_URL") DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" AWS_STORAGE_BUCKET_NAME = env("AWS_MEDIA_BUCKET", None) AWS_S3_REGION_NAME = env("AWS_REGION_NAME", None) AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", None) AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", None)
import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from .base import * # noqa from .base import env SECRET_KEY = env("SECRET_KEY") # CELERY_BROKER_URL = env("CELERY_BROKER_URL") USE_SCHEDULER = False # if FRONTEND_URL == "http://testfrontend.it/": # raise ImproperlyConfigured("Please configure FRONTEND_URL for production") SENTRY_DSN = env("SENTRY_DSN", default="") if SENTRY_DSN: sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()]) SLACK_INCOMING_WEBHOOK_URL = env("SLACK_INCOMING_WEBHOOK_URL") DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" AWS_STORAGE_BUCKET_NAME = env("AWS_MEDIA_BUCKET", None) AWS_S3_REGION_NAME = env("AWS_REGION_NAME", "eu-central-1") AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", None) AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", None)
Add better default for s3 region
Add better default for s3 region
Python
mit
patrick91/pycon,patrick91/pycon
import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from .base import * # noqa from .base import env SECRET_KEY = env("SECRET_KEY") # CELERY_BROKER_URL = env("CELERY_BROKER_URL") USE_SCHEDULER = False # if FRONTEND_URL == "http://testfrontend.it/": # raise ImproperlyConfigured("Please configure FRONTEND_URL for production") SENTRY_DSN = env("SENTRY_DSN", default="") if SENTRY_DSN: sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()]) SLACK_INCOMING_WEBHOOK_URL = env("SLACK_INCOMING_WEBHOOK_URL") DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" AWS_STORAGE_BUCKET_NAME = env("AWS_MEDIA_BUCKET", None) AWS_S3_REGION_NAME = env("AWS_REGION_NAME", "eu-central-1") AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", None) AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", None)
Add better default for s3 region import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from .base import * # noqa from .base import env SECRET_KEY = env("SECRET_KEY") # CELERY_BROKER_URL = env("CELERY_BROKER_URL") USE_SCHEDULER = False # if FRONTEND_URL == "http://testfrontend.it/": # raise ImproperlyConfigured("Please configure FRONTEND_URL for production") SENTRY_DSN = env("SENTRY_DSN", default="") if SENTRY_DSN: sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()]) SLACK_INCOMING_WEBHOOK_URL = env("SLACK_INCOMING_WEBHOOK_URL") DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" AWS_STORAGE_BUCKET_NAME = env("AWS_MEDIA_BUCKET", None) AWS_S3_REGION_NAME = env("AWS_REGION_NAME", None) AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", None) AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", None)
e78910c8b9ecf48f96a693dae3c15afa32a12da1
casexml/apps/phone/views.py
casexml/apps/phone/views.py
from django_digest.decorators import * from casexml.apps.phone import xml from casexml.apps.case.models import CommCareCase from casexml.apps.phone.restore import generate_restore_response from casexml.apps.phone.models import User from casexml.apps.case import const @httpdigest def restore(request): user = User.from_django_user(request.user) restore_id = request.GET.get('since') return generate_restore_response(user, restore_id) def xml_for_case(request, case_id, version="1.0"): """ Test view to get the xml for a particular case """ from django.http import HttpResponse case = CommCareCase.get(case_id) return HttpResponse(xml.get_case_xml(case, [const.CASE_ACTION_CREATE, const.CASE_ACTION_UPDATE], version), mimetype="text/xml")
from django.http import HttpResponse from django_digest.decorators import * from casexml.apps.phone import xml from casexml.apps.case.models import CommCareCase from casexml.apps.phone.restore import generate_restore_response from casexml.apps.phone.models import User from casexml.apps.case import const @httpdigest def restore(request): user = User.from_django_user(request.user) restore_id = request.GET.get('since') return generate_restore_response(user, restore_id) def xml_for_case(request, case_id, version="1.0"): """ Test view to get the xml for a particular case """ case = CommCareCase.get(case_id) return HttpResponse(xml.get_case_xml(case, [const.CASE_ACTION_CREATE, const.CASE_ACTION_UPDATE], version), mimetype="text/xml")
Revert "moving httpresponse to view"
Revert "moving httpresponse to view" This reverts commit a6f501bb9de6382e35372996851916adac067fa0.
Python
bsd-3-clause
SEL-Columbia/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq
from django.http import HttpResponse from django_digest.decorators import * from casexml.apps.phone import xml from casexml.apps.case.models import CommCareCase from casexml.apps.phone.restore import generate_restore_response from casexml.apps.phone.models import User from casexml.apps.case import const @httpdigest def restore(request): user = User.from_django_user(request.user) restore_id = request.GET.get('since') return generate_restore_response(user, restore_id) def xml_for_case(request, case_id, version="1.0"): """ Test view to get the xml for a particular case """ case = CommCareCase.get(case_id) return HttpResponse(xml.get_case_xml(case, [const.CASE_ACTION_CREATE, const.CASE_ACTION_UPDATE], version), mimetype="text/xml")
Revert "moving httpresponse to view" This reverts commit a6f501bb9de6382e35372996851916adac067fa0. from django_digest.decorators import * from casexml.apps.phone import xml from casexml.apps.case.models import CommCareCase from casexml.apps.phone.restore import generate_restore_response from casexml.apps.phone.models import User from casexml.apps.case import const @httpdigest def restore(request): user = User.from_django_user(request.user) restore_id = request.GET.get('since') return generate_restore_response(user, restore_id) def xml_for_case(request, case_id, version="1.0"): """ Test view to get the xml for a particular case """ from django.http import HttpResponse case = CommCareCase.get(case_id) return HttpResponse(xml.get_case_xml(case, [const.CASE_ACTION_CREATE, const.CASE_ACTION_UPDATE], version), mimetype="text/xml")
eb7ff9cec9360af0b5c18915164a54d4755e657b
mistraldashboard/dashboards/mistral/executions/tables.py
mistraldashboard/dashboards/mistral/executions/tables.py
# -*- coding: utf-8 -*- # # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext_lazy as _ from horizon import tables class ExecutionsTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID"), link=("horizon:mistral:executions:tasks")) wb_name = tables.Column("workbook_name", verbose_name=_("Workbook")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "executions" verbose_name = _("Executions") class TaskTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID")) name = tables.Column("name", verbose_name=_("Name")) action = tables.Column("action", verbose_name=_("Action")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "tasks" verbose_name = _("Tasks")
# -*- coding: utf-8 -*- # # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext_lazy as _ from horizon import tables class ExecutionsTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID"), link=("horizon:mistral:executions:tasks")) wb_name = tables.Column("workbook_name", verbose_name=_("Workbook")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "executions" verbose_name = _("Executions") class TaskTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID")) name = tables.Column("name", verbose_name=_("Name")) parameters = tables.Column("parameters", verbose_name=_("Parameters")) output = tables.Column("output", verbose_name=_("Output")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "tasks" verbose_name = _("Tasks")
Add Task's output and parameters columns
Add Task's output and parameters columns Change-Id: I98f57a6a0178bb7258d82f3a165127f060f42f7b Implements: blueprint mistral-ui
Python
apache-2.0
openstack/mistral-dashboard,openstack/mistral-dashboard,openstack/mistral-dashboard
# -*- coding: utf-8 -*- # # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext_lazy as _ from horizon import tables class ExecutionsTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID"), link=("horizon:mistral:executions:tasks")) wb_name = tables.Column("workbook_name", verbose_name=_("Workbook")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "executions" verbose_name = _("Executions") class TaskTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID")) name = tables.Column("name", verbose_name=_("Name")) parameters = tables.Column("parameters", verbose_name=_("Parameters")) output = tables.Column("output", verbose_name=_("Output")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "tasks" verbose_name = _("Tasks")
Add Task's output and parameters columns Change-Id: I98f57a6a0178bb7258d82f3a165127f060f42f7b Implements: blueprint mistral-ui # -*- coding: utf-8 -*- # # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext_lazy as _ from horizon import tables class ExecutionsTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID"), link=("horizon:mistral:executions:tasks")) wb_name = tables.Column("workbook_name", verbose_name=_("Workbook")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "executions" verbose_name = _("Executions") class TaskTable(tables.DataTable): id = tables.Column("id", verbose_name=_("ID")) name = tables.Column("name", verbose_name=_("Name")) action = tables.Column("action", verbose_name=_("Action")) state = tables.Column("state", verbose_name=_("State")) class Meta: name = "tasks" verbose_name = _("Tasks")
ee362795318507b757795e0be4c45d68c17cd28f
roll.py
roll.py
#!/usr/bin/env python """roll simulates rolling polyhedral dice.""" # roll.py # Michael McMahon from random import randrange # Die roll function # This function rolls polyhedral dice. Example: To roll a d8, use roll(8). def roll(diefaces): """Simulate rolling polyhedral dice""" return randrange(1, int(diefaces + 1))
#!/usr/bin/env python """roll simulates rolling polyhedral dice.""" # roll.py # roll v1.0 # Michael McMahon from random import randrange # Die roll function # This function rolls polyhedral dice. Example: To roll a d8, use roll(8). def roll(diefaces): """Simulate rolling polyhedral dice""" assert isinstance(diefaces, int) and diefaces >= 1 return randrange(1, int(diefaces + 1))
Add assert to prevent invalid input
Add assert to prevent invalid input
Python
agpl-3.0
TechnologyClassroom/dice-mechanic-sim,TechnologyClassroom/dice-mechanic-sim
#!/usr/bin/env python """roll simulates rolling polyhedral dice.""" # roll.py # roll v1.0 # Michael McMahon from random import randrange # Die roll function # This function rolls polyhedral dice. Example: To roll a d8, use roll(8). def roll(diefaces): """Simulate rolling polyhedral dice""" assert isinstance(diefaces, int) and diefaces >= 1 return randrange(1, int(diefaces + 1))
Add assert to prevent invalid input #!/usr/bin/env python """roll simulates rolling polyhedral dice.""" # roll.py # Michael McMahon from random import randrange # Die roll function # This function rolls polyhedral dice. Example: To roll a d8, use roll(8). def roll(diefaces): """Simulate rolling polyhedral dice""" return randrange(1, int(diefaces + 1))
67f3694254e08331152cd410dec128c11e965222
daisyproducer/settings.py
daisyproducer/settings.py
from settings_common import * PACKAGE_VERSION = "0.5" DEBUG = TEMPLATE_DEBUG = True DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline') EXTERNAL_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp') SERVE_STATIC_FILES = True # the following is an idea from https://code.djangoproject.com/wiki/SplitSettings # We have both local settings and common settings. They are used as follows: # - common settings are shared data between normal settings and unit test settings # - local settings are used on productive servers to keep the local # settings such as db passwords, etc out of version control try: from settings_local import * except ImportError: pass
from settings_common import * PACKAGE_VERSION = "0.5" DEBUG = TEMPLATE_DEBUG = True DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', '..', 'tmp', 'pipeline') EXTERNAL_PATH = os.path.join(PROJECT_DIR, '..', '..', '..', 'tmp') SERVE_STATIC_FILES = True # the following is an idea from https://code.djangoproject.com/wiki/SplitSettings # We have both local settings and common settings. They are used as follows: # - common settings are shared data between normal settings and unit test settings # - local settings are used on productive servers to keep the local # settings such as db passwords, etc out of version control try: from settings_local import * except ImportError: pass
Fix the path to external tools
Fix the path to external tools
Python
agpl-3.0
sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer
from settings_common import * PACKAGE_VERSION = "0.5" DEBUG = TEMPLATE_DEBUG = True DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', '..', 'tmp', 'pipeline') EXTERNAL_PATH = os.path.join(PROJECT_DIR, '..', '..', '..', 'tmp') SERVE_STATIC_FILES = True # the following is an idea from https://code.djangoproject.com/wiki/SplitSettings # We have both local settings and common settings. They are used as follows: # - common settings are shared data between normal settings and unit test settings # - local settings are used on productive servers to keep the local # settings such as db passwords, etc out of version control try: from settings_local import * except ImportError: pass
Fix the path to external tools from settings_common import * PACKAGE_VERSION = "0.5" DEBUG = TEMPLATE_DEBUG = True DAISY_PIPELINE_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp', 'pipeline') EXTERNAL_PATH = os.path.join(PROJECT_DIR, '..', '..', 'tmp') SERVE_STATIC_FILES = True # the following is an idea from https://code.djangoproject.com/wiki/SplitSettings # We have both local settings and common settings. They are used as follows: # - common settings are shared data between normal settings and unit test settings # - local settings are used on productive servers to keep the local # settings such as db passwords, etc out of version control try: from settings_local import * except ImportError: pass
e7e37e9b1fd56d18711299065d6f421c1cb28bac
moksha/tests/test_feed.py
moksha/tests/test_feed.py
from tw.api import Widget from moksha.feed import Feed class TestFeed(object): def test_feed_subclassing(self): class MyFeed(Feed): url = 'http://lewk.org/rss' feed = MyFeed() assert feed.url == 'http://lewk.org/rss' assert feed.num_entries() > 0 for entry in feed.iterentries(): pass for entry in feed.entries(): pass def test_widget_children(self): class MyWidget(Widget): myfeedurl = 'http://lewk.org/rss' children = [Feed('myfeed', url=myfeedurl)] template = "mako:${c.myfeed()}" widget = MyWidget() assert widget.c.myfeed def test_feed_generator(self): feed = Feed(url='http://lewk.org/rss') iter = feed.iterentries() data = iter.next() assert iter.next()
Add some Feed test cases
Add some Feed test cases
Python
apache-2.0
pombredanne/moksha,lmacken/moksha,pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha
from tw.api import Widget from moksha.feed import Feed class TestFeed(object): def test_feed_subclassing(self): class MyFeed(Feed): url = 'http://lewk.org/rss' feed = MyFeed() assert feed.url == 'http://lewk.org/rss' assert feed.num_entries() > 0 for entry in feed.iterentries(): pass for entry in feed.entries(): pass def test_widget_children(self): class MyWidget(Widget): myfeedurl = 'http://lewk.org/rss' children = [Feed('myfeed', url=myfeedurl)] template = "mako:${c.myfeed()}" widget = MyWidget() assert widget.c.myfeed def test_feed_generator(self): feed = Feed(url='http://lewk.org/rss') iter = feed.iterentries() data = iter.next() assert iter.next()
Add some Feed test cases
254403f507ea8ae075a791f24a031eaa79fc2447
tools/dev/wc-format.py
tools/dev/wc-format.py
#!/usr/bin/env python import os import sqlite3 import sys # helper def usage(): sys.stderr.write("USAGE: %s [PATH]\n" + \ "\n" + \ "Prints to stdout the format of the working copy at PATH.\n") # parse argv wc = (sys.argv[1:] + ['.'])[0] # main() entries = os.path.join(wc, '.svn', 'entries') wc_db = os.path.join(wc, '.svn', 'wc.db') if os.path.exists(entries): formatno = int(open(entries).readline()) elif os.path.exists(wc_db): formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0] else: usage() sys.exit(1) # 1.0.x -> 1.3.x: format 4 # 1.4.x: format 8 # 1.5.x: format 9 # 1.6.x: format 10 # 1.7.x: format XXX print("%s: %d" % (wc, formatno))
Add a helper script, ported to Python.
Add a helper script, ported to Python. * tools/dev/wc-format.py: New. Prints the working copy format of a given directory.
Python
apache-2.0
jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion
#!/usr/bin/env python import os import sqlite3 import sys # helper def usage(): sys.stderr.write("USAGE: %s [PATH]\n" + \ "\n" + \ "Prints to stdout the format of the working copy at PATH.\n") # parse argv wc = (sys.argv[1:] + ['.'])[0] # main() entries = os.path.join(wc, '.svn', 'entries') wc_db = os.path.join(wc, '.svn', 'wc.db') if os.path.exists(entries): formatno = int(open(entries).readline()) elif os.path.exists(wc_db): formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0] else: usage() sys.exit(1) # 1.0.x -> 1.3.x: format 4 # 1.4.x: format 8 # 1.5.x: format 9 # 1.6.x: format 10 # 1.7.x: format XXX print("%s: %d" % (wc, formatno))
Add a helper script, ported to Python. * tools/dev/wc-format.py: New. Prints the working copy format of a given directory.
d7ed79ec53279f0fea0881703079a1c5b82bf938
_settings.py
_settings.py
# Configuration settings # ID of HPO to validate (see resources/hpo.csv) hpo_id = 'hpo_id' # location of files to validate, evaluate csv_dir = 'path/to/csv_files' # sprint number being validated against sprint_num = 0 # Submissions and logs stored here # For more examples and requirements see http://docs.sqlalchemy.org/en/latest/core/engines.html conn_str = 'mssql+pymssql://localhost/pmi_sprint_1'
# Configuration settings # ID of HPO to validate (see resources/hpo.csv) hpo_id = 'hpo_id' # location of files to validate, evaluate csv_dir = 'path/to/csv_files' # sprint number being validated against sprint_num = 0 # Submissions and logs stored here # Note: Connecting to MSSQL from *nix may require FreeTDS configuration (see https://goo.gl/qKhusY) # For more examples and requirements see http://docs.sqlalchemy.org/en/latest/core/engines.html conn_str = 'mssql+pymssql://localhost/pmi_sprint_1'
Add comment regarding freetds config
Add comment regarding freetds config
Python
mit
cumc-dbmi/pmi_sprint_reporter
# Configuration settings # ID of HPO to validate (see resources/hpo.csv) hpo_id = 'hpo_id' # location of files to validate, evaluate csv_dir = 'path/to/csv_files' # sprint number being validated against sprint_num = 0 # Submissions and logs stored here # Note: Connecting to MSSQL from *nix may require FreeTDS configuration (see https://goo.gl/qKhusY) # For more examples and requirements see http://docs.sqlalchemy.org/en/latest/core/engines.html conn_str = 'mssql+pymssql://localhost/pmi_sprint_1'
Add comment regarding freetds config # Configuration settings # ID of HPO to validate (see resources/hpo.csv) hpo_id = 'hpo_id' # location of files to validate, evaluate csv_dir = 'path/to/csv_files' # sprint number being validated against sprint_num = 0 # Submissions and logs stored here # For more examples and requirements see http://docs.sqlalchemy.org/en/latest/core/engines.html conn_str = 'mssql+pymssql://localhost/pmi_sprint_1'
6cfc94d8a03439c55808090aa5e3a4f35c288887
menpodetect/tests/opencv_test.py
menpodetect/tests/opencv_test.py
from menpodetect.opencv import (load_opencv_frontal_face_detector, load_opencv_eye_detector) import menpo.io as mio takeo = mio.import_builtin_asset.takeo_ppm() def test_frontal_face_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4 def test_frontal_face_detector_min_neighbors(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy, min_neighbours=100) assert len(pcs) == 0 assert takeo_copy.n_channels == 3 def test_eye_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_eye_detector() pcs = opencv_detector(takeo_copy, min_size=(5, 5)) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
from numpy.testing import assert_allclose from menpodetect.opencv import (load_opencv_frontal_face_detector, load_opencv_eye_detector) import menpo.io as mio takeo = mio.import_builtin_asset.takeo_ppm() def test_frontal_face_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4 def test_frontal_face_detector_min_neighbors(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy, min_neighbours=100) assert len(pcs) == 0 assert takeo_copy.n_channels == 3 def test_eye_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_eye_detector() pcs = opencv_detector(takeo_copy, min_size=(5, 5)) assert_allclose(len(pcs), 1) assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
Use assert_allclose so we can see the appveyor failure
Use assert_allclose so we can see the appveyor failure
Python
bsd-3-clause
yuxiang-zhou/menpodetect,jabooth/menpodetect,yuxiang-zhou/menpodetect,jabooth/menpodetect
from numpy.testing import assert_allclose from menpodetect.opencv import (load_opencv_frontal_face_detector, load_opencv_eye_detector) import menpo.io as mio takeo = mio.import_builtin_asset.takeo_ppm() def test_frontal_face_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4 def test_frontal_face_detector_min_neighbors(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy, min_neighbours=100) assert len(pcs) == 0 assert takeo_copy.n_channels == 3 def test_eye_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_eye_detector() pcs = opencv_detector(takeo_copy, min_size=(5, 5)) assert_allclose(len(pcs), 1) assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
Use assert_allclose so we can see the appveyor failure from menpodetect.opencv import (load_opencv_frontal_face_detector, load_opencv_eye_detector) import menpo.io as mio takeo = mio.import_builtin_asset.takeo_ppm() def test_frontal_face_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4 def test_frontal_face_detector_min_neighbors(): takeo_copy = takeo.copy() opencv_detector = load_opencv_frontal_face_detector() pcs = opencv_detector(takeo_copy, min_neighbours=100) assert len(pcs) == 0 assert takeo_copy.n_channels == 3 def test_eye_detector(): takeo_copy = takeo.copy() opencv_detector = load_opencv_eye_detector() pcs = opencv_detector(takeo_copy, min_size=(5, 5)) assert len(pcs) == 1 assert takeo_copy.n_channels == 3 assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
9144e6011df4aebd74db152dad2bb07a8eebf6ee
setup_egg.py
setup_egg.py
#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" if __name__ == '__main__': execfile('setup.py', dict(__name__='__main__', __file__='setup.py', # needed in setup.py force_setuptools=True))
#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" if __name__ == '__main__': exec('setup.py', dict(__name__='__main__', __file__='setup.py', # needed in setup.py force_setuptools=True))
Use `exec` instead of `execfile`.
Use `exec` instead of `execfile`.
Python
bsd-3-clause
FrancoisRheaultUS/dipy,FrancoisRheaultUS/dipy
#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" if __name__ == '__main__': exec('setup.py', dict(__name__='__main__', __file__='setup.py', # needed in setup.py force_setuptools=True))
Use `exec` instead of `execfile`. #!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" if __name__ == '__main__': execfile('setup.py', dict(__name__='__main__', __file__='setup.py', # needed in setup.py force_setuptools=True))
d86144aa09ea0d6a679a661b0b2f887d6a2a725d
examples/python/values.py
examples/python/values.py
#! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * a = AtomSpace() set_type_ctor_atomspace(a) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) print('get value from atom: {}'.format(boundingBox.get_value(featureKey)))
#! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * a = AtomSpace() set_type_ctor_atomspace(a) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) value = boundingBox.get_value(featureKey) print('get value from atom: {}'.format(value)) list = value.to_list() print('get python list from value: {}'.format(list))
Add example of Value to Python list conversion
Add example of Value to Python list conversion
Python
agpl-3.0
rTreutlein/atomspace,AmeBel/atomspace,AmeBel/atomspace,rTreutlein/atomspace,AmeBel/atomspace,rTreutlein/atomspace,rTreutlein/atomspace,AmeBel/atomspace,AmeBel/atomspace,rTreutlein/atomspace
#! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * a = AtomSpace() set_type_ctor_atomspace(a) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) value = boundingBox.get_value(featureKey) print('get value from atom: {}'.format(value)) list = value.to_list() print('get python list from value: {}'.format(list))
Add example of Value to Python list conversion #! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * a = AtomSpace() set_type_ctor_atomspace(a) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) print('get value from atom: {}'.format(boundingBox.get_value(featureKey)))
f76ccddca4864b2f2faf8dfadefa6ac15c930043
examples/tour_examples/driverjs_maps_tour.py
examples/tour_examples/driverjs_maps_tour.py
from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_basic(self): self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z") self.wait_for_element("#searchboxinput") self.wait_for_element("#minimap") self.wait_for_element("#zoom") # Create a website tour using the DriverJS library # Same as: self.create_driverjs_tour() self.create_tour(theme="driverjs") self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html", title="✅ SeleniumBase Tours 🌎") self.add_tour_step("You can type a location into this Search box.", "#searchboxinput") self.add_tour_step("Then click here to view it on the map.", "#searchbox-searchbutton", alignment="bottom") self.add_tour_step("Or click here to get driving directions.", "#searchbox-directions", alignment="bottom") self.add_tour_step("Use this button to get a Satellite view.", "#minimap div.widget-minimap", alignment="right") self.add_tour_step("Click here to zoom in.", "#widget-zoom-in", alignment="left") self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out", alignment="left") self.add_tour_step("Use the Menu button for more options.", ".searchbox-hamburger-container", alignment="right") self.add_tour_step("Or click here to see more Google apps.", '[title="Google apps"]', alignment="left") self.add_tour_step("Thanks for using SeleniumBase Tours", "html", title="🚃 End of Guided Tour 🚃") self.export_tour() # The default name for exports is "my_tour.js" self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
Add an example tour for DriverJS
Add an example tour for DriverJS
Python
mit
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_basic(self): self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z") self.wait_for_element("#searchboxinput") self.wait_for_element("#minimap") self.wait_for_element("#zoom") # Create a website tour using the DriverJS library # Same as: self.create_driverjs_tour() self.create_tour(theme="driverjs") self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html", title="✅ SeleniumBase Tours 🌎") self.add_tour_step("You can type a location into this Search box.", "#searchboxinput") self.add_tour_step("Then click here to view it on the map.", "#searchbox-searchbutton", alignment="bottom") self.add_tour_step("Or click here to get driving directions.", "#searchbox-directions", alignment="bottom") self.add_tour_step("Use this button to get a Satellite view.", "#minimap div.widget-minimap", alignment="right") self.add_tour_step("Click here to zoom in.", "#widget-zoom-in", alignment="left") self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out", alignment="left") self.add_tour_step("Use the Menu button for more options.", ".searchbox-hamburger-container", alignment="right") self.add_tour_step("Or click here to see more Google apps.", '[title="Google apps"]', alignment="left") self.add_tour_step("Thanks for using SeleniumBase Tours", "html", title="🚃 End of Guided Tour 🚃") self.export_tour() # The default name for exports is "my_tour.js" self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
Add an example tour for DriverJS
ad07405ca877d65f30c9acd19abb4e782d854eaa
workshops/views.py
workshops/views.py
from django.views.generic import ListView, DetailView from config.utils import get_active_event from workshops.models import Workshop class WorkshopListView(ListView): template_name = 'workshops/list_workshops.html' model = Workshop context_object_name = 'workshops' def get_queryset(self): event = get_active_event() return (super().get_queryset() .filter(event=event) .prefetch_related('applicants__user', 'skill_level') .order_by('title')) class WorkshopDetailView(DetailView): template_name = 'workshops/view_workshop.html' model = Workshop def get_queryset(self): return super().get_queryset().prefetch_related('applicants__user', 'skill_level')
from django.views.generic import ListView, DetailView from config.utils import get_active_event from workshops.models import Workshop class WorkshopListView(ListView): template_name = 'workshops/list_workshops.html' model = Workshop context_object_name = 'workshops' def get_queryset(self): event = get_active_event() return (super().get_queryset() .filter(event=event) .prefetch_related('applicants__user', 'skill_level') .order_by('starts_at', 'title')) class WorkshopDetailView(DetailView): template_name = 'workshops/view_workshop.html' model = Workshop def get_queryset(self): return super().get_queryset().prefetch_related('applicants__user', 'skill_level')
Order workshops by start date before title
Order workshops by start date before title
Python
bsd-3-clause
WebCampZg/conference-web,WebCampZg/conference-web,WebCampZg/conference-web
from django.views.generic import ListView, DetailView from config.utils import get_active_event from workshops.models import Workshop class WorkshopListView(ListView): template_name = 'workshops/list_workshops.html' model = Workshop context_object_name = 'workshops' def get_queryset(self): event = get_active_event() return (super().get_queryset() .filter(event=event) .prefetch_related('applicants__user', 'skill_level') .order_by('starts_at', 'title')) class WorkshopDetailView(DetailView): template_name = 'workshops/view_workshop.html' model = Workshop def get_queryset(self): return super().get_queryset().prefetch_related('applicants__user', 'skill_level')
Order workshops by start date before title from django.views.generic import ListView, DetailView from config.utils import get_active_event from workshops.models import Workshop class WorkshopListView(ListView): template_name = 'workshops/list_workshops.html' model = Workshop context_object_name = 'workshops' def get_queryset(self): event = get_active_event() return (super().get_queryset() .filter(event=event) .prefetch_related('applicants__user', 'skill_level') .order_by('title')) class WorkshopDetailView(DetailView): template_name = 'workshops/view_workshop.html' model = Workshop def get_queryset(self): return super().get_queryset().prefetch_related('applicants__user', 'skill_level')
8f31a87ace324c519eac8d883cf0327d08f48df0
lib/ansiblelint/rules/VariableHasSpacesRule.py
lib/ansiblelint/rules/VariableHasSpacesRule.py
# Copyright (c) 2016, Will Thames and contributors # Copyright (c) 2018, Ansible Project from ansiblelint import AnsibleLintRule import re class VariableHasSpacesRule(AnsibleLintRule): id = '206' shortdesc = 'Variables should have spaces before and after: {{ var_name }}' description = 'Variables should have spaces before and after: ``{{ var_name }}``' severity = 'LOW' tags = ['formatting'] version_added = 'v4.0.0' variable_syntax = re.compile(r"{{.*}}") bracket_regex = re.compile(r"{{[^{' -]|[^ '}-]}}") def match(self, file, line): if not self.variable_syntax.search(line): return return self.bracket_regex.search(line)
# Copyright (c) 2016, Will Thames and contributors # Copyright (c) 2018, Ansible Project from ansiblelint import AnsibleLintRule import re class VariableHasSpacesRule(AnsibleLintRule): id = '206' shortdesc = 'Variables should have spaces before and after: {{ var_name }}' description = 'Variables should have spaces before and after: ``{{ var_name }}``' severity = 'LOW' tags = ['formatting'] version_added = 'v4.0.0' variable_syntax = re.compile(r"{{.*}}") bracket_regex = re.compile(r"{{[^{' -]|[^ '}-]}}") def match(self, file, line): if not self.variable_syntax.search(line): return line_exclude_json = re.sub(r"[^{]{'\w+': ?[^{]{.*?}}", "", line) return self.bracket_regex.search(line_exclude_json)
Fix nested JSON obj false positive
var-space-rule: Fix nested JSON obj false positive When using compact form nested JSON object within a Jinja2 context as shown in the following example: set_fact:"{{ {'test': {'subtest': variable}} }}" 'variable}}' will raise a false positive [206] error. This commit adds an intermediate step within 206 (VariableHasSpacesRule.py) rule to exclude nested JSON object before matching for an actual error. Fixes: #665 Signed-off-by: Simon Kheng <[email protected]>
Python
mit
willthames/ansible-lint
# Copyright (c) 2016, Will Thames and contributors # Copyright (c) 2018, Ansible Project from ansiblelint import AnsibleLintRule import re class VariableHasSpacesRule(AnsibleLintRule): id = '206' shortdesc = 'Variables should have spaces before and after: {{ var_name }}' description = 'Variables should have spaces before and after: ``{{ var_name }}``' severity = 'LOW' tags = ['formatting'] version_added = 'v4.0.0' variable_syntax = re.compile(r"{{.*}}") bracket_regex = re.compile(r"{{[^{' -]|[^ '}-]}}") def match(self, file, line): if not self.variable_syntax.search(line): return line_exclude_json = re.sub(r"[^{]{'\w+': ?[^{]{.*?}}", "", line) return self.bracket_regex.search(line_exclude_json)
var-space-rule: Fix nested JSON obj false positive When using compact form nested JSON object within a Jinja2 context as shown in the following example: set_fact:"{{ {'test': {'subtest': variable}} }}" 'variable}}' will raise a false positive [206] error. This commit adds an intermediate step within 206 (VariableHasSpacesRule.py) rule to exclude nested JSON object before matching for an actual error. Fixes: #665 Signed-off-by: Simon Kheng <[email protected]> # Copyright (c) 2016, Will Thames and contributors # Copyright (c) 2018, Ansible Project from ansiblelint import AnsibleLintRule import re class VariableHasSpacesRule(AnsibleLintRule): id = '206' shortdesc = 'Variables should have spaces before and after: {{ var_name }}' description = 'Variables should have spaces before and after: ``{{ var_name }}``' severity = 'LOW' tags = ['formatting'] version_added = 'v4.0.0' variable_syntax = re.compile(r"{{.*}}") bracket_regex = re.compile(r"{{[^{' -]|[^ '}-]}}") def match(self, file, line): if not self.variable_syntax.search(line): return return self.bracket_regex.search(line)
d0494a9475437e70f5f03576d9b8888aaadac458
migrations/versions/1815829d365_.py
migrations/versions/1815829d365_.py
"""empty message Revision ID: 1815829d365 Revises: 3fcddd64a72 Create Date: 2016-02-09 17:58:47.362133 """ # revision identifiers, used by Alembic. revision = '1815829d365' down_revision = '3fcddd64a72' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### # create new unique index to include geo app ref op.execute("DROP INDEX title_abr_idx") op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))") ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.execute("DROP INDEX title_abr_geo_idx") op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))") ### end Alembic commands ###
Replace title_abr_idx with new unique index that includes geometry_application_reference
Replace title_abr_idx with new unique index that includes geometry_application_reference
Python
mit
LandRegistry/system-of-record,LandRegistry/system-of-record
"""empty message Revision ID: 1815829d365 Revises: 3fcddd64a72 Create Date: 2016-02-09 17:58:47.362133 """ # revision identifiers, used by Alembic. revision = '1815829d365' down_revision = '3fcddd64a72' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### # create new unique index to include geo app ref op.execute("DROP INDEX title_abr_idx") op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))") ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.execute("DROP INDEX title_abr_geo_idx") op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))") ### end Alembic commands ###
Replace title_abr_idx with new unique index that includes geometry_application_reference
fb08c6cfe6b6295a9aca9e579a067f34ee1c69c2
test/get-gh-comment-info.py
test/get-gh-comment-info.py
import argparse parser = argparse.ArgumentParser() parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases parser.add_argument('--focus', type=str, default="") parser.add_argument('--kernel_version', type=str, default="") parser.add_argument('--k8s_version', type=str, default="") parser.add_argument('--retrieve', type=str, default="focus") args = parser.parse_args() print(args.__dict__[args.retrieve])
import argparse parser = argparse.ArgumentParser() parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases parser.add_argument('--focus', type=str, default="") parser.add_argument('--kernel_version', type=str, default="") parser.add_argument('--k8s_version', type=str, default="") parser.add_argument('--retrieve', type=str, default="focus") args = parser.parse_args() # Update kernel_version to expected format args.kernel_version = args.kernel_version.replace('.', '') if args.kernel_version == "netnext": args.kernel_version = "net-next" print(args.__dict__[args.retrieve])
Format test-only's kernel_version to avoid mistakes
test: Format test-only's kernel_version to avoid mistakes I often try to start test-only builds with e.g.: test-only --kernel_version=4.19 --focus="..." That fails because our tests expect "419". We can extend the Python script used to parse argument to recognize that and update kernel_version to the expected format. Signed-off-by: Paul Chaignon <[email protected]>
Python
apache-2.0
cilium/cilium,tklauser/cilium,tgraf/cilium,tklauser/cilium,michi-covalent/cilium,tklauser/cilium,cilium/cilium,tgraf/cilium,cilium/cilium,michi-covalent/cilium,tgraf/cilium,tgraf/cilium,michi-covalent/cilium,michi-covalent/cilium,tgraf/cilium,cilium/cilium,tklauser/cilium,michi-covalent/cilium,tklauser/cilium,cilium/cilium,tgraf/cilium
import argparse parser = argparse.ArgumentParser() parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases parser.add_argument('--focus', type=str, default="") parser.add_argument('--kernel_version', type=str, default="") parser.add_argument('--k8s_version', type=str, default="") parser.add_argument('--retrieve', type=str, default="focus") args = parser.parse_args() # Update kernel_version to expected format args.kernel_version = args.kernel_version.replace('.', '') if args.kernel_version == "netnext": args.kernel_version = "net-next" print(args.__dict__[args.retrieve])
test: Format test-only's kernel_version to avoid mistakes I often try to start test-only builds with e.g.: test-only --kernel_version=4.19 --focus="..." That fails because our tests expect "419". We can extend the Python script used to parse argument to recognize that and update kernel_version to the expected format. Signed-off-by: Paul Chaignon <[email protected]> import argparse parser = argparse.ArgumentParser() parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases parser.add_argument('--focus', type=str, default="") parser.add_argument('--kernel_version', type=str, default="") parser.add_argument('--k8s_version', type=str, default="") parser.add_argument('--retrieve', type=str, default="focus") args = parser.parse_args() print(args.__dict__[args.retrieve])
a53fba0f648b3472834443fa3dc31c0611bcb6a3
test/test_mcmc_serial.py
test/test_mcmc_serial.py
import time import numpy as np import yaml import quantitation # Set parameters path_cfg = 'examples/basic.yml' # Load config cfg = yaml.load(open(path_cfg, 'rb')) # Load data mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'], dtype=np.int) mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'], dtype=[('peptide', np.int), ('intensity', np.float)], unpack=True) # Run MCMC sampler time_start = time.time() draws, accept_stats = quantitation.mcmc_serial(intensities_obs, mapping_states_obs, mapping_peptides, cfg) time_done = time.time() # Print timing information print "%f seconds for %d iterations" % (time_done-time_start, cfg['settings']['n_iterations']) print "%f seconds per iteration" % ((time_done-time_start) / (0.+cfg['settings']['n_iterations'])) # Extract posterior means means = {} for k, x in draws.iteritems(): means[k] = np.mean(x, 0)
Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Add basic test script for mcmc_serial. Code now passes with conditions on prior. Code runs with all prior inputs on ups2 and simulated data. However, variance hyperparameters exhibit issues when used with improper priors on the rate parameter. The shape and rate parameters diverge towards infinity as their ratio (the expected precision) remains fixed. All logic and mathematics have been checked extremely carefully and no errors appear to remain. I believe, at this point, that the given problem arises from posterior impropriety. This needs to be checked mathematically, likely on a simpler hierarchical model with no missing data, two layers of variance parameters, and three layers of normal means/observations.
Python
bsd-3-clause
awblocker/quantitation,awblocker/quantitation,awblocker/quantitation
import time import numpy as np import yaml import quantitation # Set parameters path_cfg = 'examples/basic.yml' # Load config cfg = yaml.load(open(path_cfg, 'rb')) # Load data mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'], dtype=np.int) mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'], dtype=[('peptide', np.int), ('intensity', np.float)], unpack=True) # Run MCMC sampler time_start = time.time() draws, accept_stats = quantitation.mcmc_serial(intensities_obs, mapping_states_obs, mapping_peptides, cfg) time_done = time.time() # Print timing information print "%f seconds for %d iterations" % (time_done-time_start, cfg['settings']['n_iterations']) print "%f seconds per iteration" % ((time_done-time_start) / (0.+cfg['settings']['n_iterations'])) # Extract posterior means means = {} for k, x in draws.iteritems(): means[k] = np.mean(x, 0)
Add basic test script for mcmc_serial. Code now passes with conditions on prior. Code runs with all prior inputs on ups2 and simulated data. However, variance hyperparameters exhibit issues when used with improper priors on the rate parameter. The shape and rate parameters diverge towards infinity as their ratio (the expected precision) remains fixed. All logic and mathematics have been checked extremely carefully and no errors appear to remain. I believe, at this point, that the given problem arises from posterior impropriety. This needs to be checked mathematically, likely on a simpler hierarchical model with no missing data, two layers of variance parameters, and three layers of normal means/observations.
d4cb09e9ffa645c97976c524a3d084172f091a16
p560m/subarray_sum.py
p560m/subarray_sum.py
from typing import List from collections import defaultdict class Solution: def subarraySum(self, nums: List[int], k: int) -> int: sum_count = defaultdict(int) sum_count[0] = 1 s, ans = 0, 0 for n in nums: s += n if s - k in sum_count: ans += sum_count[s - k] sum_count[s] += 1 return ans # TESTS tests = [ ([1], 0, 0), ([1, 1, 1], 2, 2), ([1, 2, 3, 4, 5], 11, 0), ([3, 4, 7, 2, -3, 1, 4, 2], 7, 4), ] for t in tests: sol = Solution() act = sol.subarraySum(t[0], t[1]) print("# of subarrays of", t[0], "sum to", t[1], "=>", act) assert act == t[2]
from typing import List from collections import defaultdict class Solution: def subarraySum(self, nums: List[int], k: int) -> int: sum_count = defaultdict(int) sum_count[0] = 1 s, ans = 0, 0 for n in nums: s += n ans += sum_count[s - k] sum_count[s] += 1 return ans # TESTS tests = [ ([1], 0, 0), ([1, 1, 1], 2, 2), ([1, 2, 3, 4, 5], 11, 0), ([3, 4, 7, 2, -3, 1, 4, 2], 7, 4), ] for t in tests: sol = Solution() act = sol.subarraySum(t[0], t[1]) print("# of subarrays of", t[0], "sum to", t[1], "=>", act) assert act == t[2]
Update p560m subarray sum in Python
Update p560m subarray sum in Python
Python
mit
l33tdaima/l33tdaima,l33tdaima/l33tdaima,l33tdaima/l33tdaima,l33tdaima/l33tdaima,l33tdaima/l33tdaima
from typing import List from collections import defaultdict class Solution: def subarraySum(self, nums: List[int], k: int) -> int: sum_count = defaultdict(int) sum_count[0] = 1 s, ans = 0, 0 for n in nums: s += n ans += sum_count[s - k] sum_count[s] += 1 return ans # TESTS tests = [ ([1], 0, 0), ([1, 1, 1], 2, 2), ([1, 2, 3, 4, 5], 11, 0), ([3, 4, 7, 2, -3, 1, 4, 2], 7, 4), ] for t in tests: sol = Solution() act = sol.subarraySum(t[0], t[1]) print("# of subarrays of", t[0], "sum to", t[1], "=>", act) assert act == t[2]
Update p560m subarray sum in Python from typing import List from collections import defaultdict class Solution: def subarraySum(self, nums: List[int], k: int) -> int: sum_count = defaultdict(int) sum_count[0] = 1 s, ans = 0, 0 for n in nums: s += n if s - k in sum_count: ans += sum_count[s - k] sum_count[s] += 1 return ans # TESTS tests = [ ([1], 0, 0), ([1, 1, 1], 2, 2), ([1, 2, 3, 4, 5], 11, 0), ([3, 4, 7, 2, -3, 1, 4, 2], 7, 4), ] for t in tests: sol = Solution() act = sol.subarraySum(t[0], t[1]) print("# of subarrays of", t[0], "sum to", t[1], "=>", act) assert act == t[2]
c4f51fd3c030f3d88f8545a94698ed4e9f5ef9bc
timpani/webserver/webhelpers.py
timpani/webserver/webhelpers.py
import flask from .. import auth import urllib.parse def checkForSession(): if "uid" in flask.session: session = auth.validateSession(flask.session["uid"]) if session != None: return session return None def redirectAndSave(path): flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path return response def recoverFromRedirect(): donePage = flask.request.cookies["donePage"] response = flask.make_response(flask.redirect(donePage)) response.set_cookie("donePage", "", expires=0) return response def canRecoverFromRedirect(): if "donePage" in flask.session: return flask.session["donePage"] return None
import flask from .. import auth import urllib.parse def checkForSession(): if "uid" in flask.session: session = auth.validateSession(flask.session["uid"]) if session != None: return session return None def redirectAndSave(path): flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path return response def markRedirectAsRecovered(): if "donePage" in flask.session: del flask.session["donePage"] else: raise KeyError("No redirect to be recovered from.") def canRecoverFromRedirect(): if "donePage" in flask.session: return flask.session["donePage"] return None
Remove unneeded recoverFromRedirect and add markRedirectAsRecovered
Remove unneeded recoverFromRedirect and add markRedirectAsRecovered
Python
mit
ollien/Timpani,ollien/Timpani,ollien/Timpani
import flask from .. import auth import urllib.parse def checkForSession(): if "uid" in flask.session: session = auth.validateSession(flask.session["uid"]) if session != None: return session return None def redirectAndSave(path): flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path return response def markRedirectAsRecovered(): if "donePage" in flask.session: del flask.session["donePage"] else: raise KeyError("No redirect to be recovered from.") def canRecoverFromRedirect(): if "donePage" in flask.session: return flask.session["donePage"] return None
Remove unneeded recoverFromRedirect and add markRedirectAsRecovered import flask from .. import auth import urllib.parse def checkForSession(): if "uid" in flask.session: session = auth.validateSession(flask.session["uid"]) if session != None: return session return None def redirectAndSave(path): flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path return response def recoverFromRedirect(): donePage = flask.request.cookies["donePage"] response = flask.make_response(flask.redirect(donePage)) response.set_cookie("donePage", "", expires=0) return response def canRecoverFromRedirect(): if "donePage" in flask.session: return flask.session["donePage"] return None
a72a0674a6db3880ed699101be3c9c46671989f0
xxdata_11.py
xxdata_11.py
import os import _xxdata_11 parameters = { 'isdimd' : 200, 'iddimd' : 40, 'itdimd' : 50, 'ndptnl' : 4, 'ndptn' : 128, 'ndptnc' : 256, 'ndcnct' : 100 } def read_scd(filename): fd = open(filename, 'r') fortran_filename = 'fort.%d' % fd.fileno() os.symlink(filename, fortran_filename) iclass = 2 # class number for scd files ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters) os.unlink(fortran_filename) return ret if __name__ == '__main__': out = read_scd('scd96_c.dat') print out[0]
Add a primitive pythonic wrapper.
Add a primitive pythonic wrapper.
Python
mit
cfe316/atomic,ezekial4/atomic_neu,ezekial4/atomic_neu
import os import _xxdata_11 parameters = { 'isdimd' : 200, 'iddimd' : 40, 'itdimd' : 50, 'ndptnl' : 4, 'ndptn' : 128, 'ndptnc' : 256, 'ndcnct' : 100 } def read_scd(filename): fd = open(filename, 'r') fortran_filename = 'fort.%d' % fd.fileno() os.symlink(filename, fortran_filename) iclass = 2 # class number for scd files ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters) os.unlink(fortran_filename) return ret if __name__ == '__main__': out = read_scd('scd96_c.dat') print out[0]
Add a primitive pythonic wrapper.
c3e2c6f77dffc2ff5874c1bb495e6de119800cf4
rx/core/observable/merge.py
rx/core/observable/merge.py
import rx from rx import operators as ops from rx.core import Observable def _merge(*args) -> Observable: """Merges all the observable sequences into a single observable sequence. 1 - merged = rx.merge(xs, ys, zs) 2 - merged = rx.merge([xs, ys, zs]) Returns: The observable sequence that merges the elements of the observable sequences. """ sources = args[:] if isinstance(sources[0], list): sources = sources[0] return rx.from_iterable(sources).pipe(ops.merge_all())
from typing import Iterable, Union import rx from rx import operators as ops from rx.core import Observable def _merge(*args: Union[Observable, Iterable[Observable]]) -> Observable: """Merges all the observable sequences into a single observable sequence. 1 - merged = rx.merge(xs, ys, zs) 2 - merged = rx.merge([xs, ys, zs]) Returns: The observable sequence that merges the elements of the observable sequences. """ sources = args[:] if isinstance(sources[0], Iterable): sources = sources[0] return rx.from_iterable(sources).pipe(ops.merge_all())
Fix typing and accept iterable instead of list
Fix typing and accept iterable instead of list
Python
mit
ReactiveX/RxPY,ReactiveX/RxPY
from typing import Iterable, Union import rx from rx import operators as ops from rx.core import Observable def _merge(*args: Union[Observable, Iterable[Observable]]) -> Observable: """Merges all the observable sequences into a single observable sequence. 1 - merged = rx.merge(xs, ys, zs) 2 - merged = rx.merge([xs, ys, zs]) Returns: The observable sequence that merges the elements of the observable sequences. """ sources = args[:] if isinstance(sources[0], Iterable): sources = sources[0] return rx.from_iterable(sources).pipe(ops.merge_all())
Fix typing and accept iterable instead of list import rx from rx import operators as ops from rx.core import Observable def _merge(*args) -> Observable: """Merges all the observable sequences into a single observable sequence. 1 - merged = rx.merge(xs, ys, zs) 2 - merged = rx.merge([xs, ys, zs]) Returns: The observable sequence that merges the elements of the observable sequences. """ sources = args[:] if isinstance(sources[0], list): sources = sources[0] return rx.from_iterable(sources).pipe(ops.merge_all())
037c2bc9857fc1feb59f7d4ad3cb81575177e675
src/smsfly/versiontools.py
src/smsfly/versiontools.py
"""Version tools set.""" import os from setuptools_scm import get_version def get_version_from_scm_tag( *, root='.', relative_to=None, local_scheme='node-and-date', ) -> str: """Retrieve the version from SCM tag in Git or Hg.""" try: return get_version( root=root, relative_to=relative_to, local_scheme=local_scheme, ) except LookupError: return 'unknown' def cut_local_version_on_upload(version): """Return empty local version if uploading to PyPI.""" is_pypi_upload = os.getenv('PYPI_UPLOAD') == 'true' if is_pypi_upload: return '' import setuptools_scm.version # only available during setup time return setuptools_scm.version.get_local_node_and_date(version) def get_self_version(): """Calculate the version of the dist itself.""" return get_version_from_scm_tag(local_scheme=cut_local_version_on_upload)
"""Version tools set.""" import os from setuptools_scm import get_version def get_version_from_scm_tag( *, root='.', relative_to=None, local_scheme='node-and-date', ): """Retrieve the version from SCM tag in Git or Hg.""" try: return get_version( root=root, relative_to=relative_to, local_scheme=local_scheme, ) except LookupError: return 'unknown' def cut_local_version_on_upload(version): """Return empty local version if uploading to PyPI.""" is_pypi_upload = os.getenv('PYPI_UPLOAD') == 'true' if is_pypi_upload: return '' import setuptools_scm.version # only available during setup time return setuptools_scm.version.get_local_node_and_date(version) def get_self_version(): """Calculate the version of the dist itself.""" return get_version_from_scm_tag(local_scheme=cut_local_version_on_upload)
Drop func annotations for the sake of Python 3.5
Drop func annotations for the sake of Python 3.5
Python
mit
wk-tech/python-smsfly
"""Version tools set.""" import os from setuptools_scm import get_version def get_version_from_scm_tag( *, root='.', relative_to=None, local_scheme='node-and-date', ): """Retrieve the version from SCM tag in Git or Hg.""" try: return get_version( root=root, relative_to=relative_to, local_scheme=local_scheme, ) except LookupError: return 'unknown' def cut_local_version_on_upload(version): """Return empty local version if uploading to PyPI.""" is_pypi_upload = os.getenv('PYPI_UPLOAD') == 'true' if is_pypi_upload: return '' import setuptools_scm.version # only available during setup time return setuptools_scm.version.get_local_node_and_date(version) def get_self_version(): """Calculate the version of the dist itself.""" return get_version_from_scm_tag(local_scheme=cut_local_version_on_upload)
Drop func annotations for the sake of Python 3.5 """Version tools set.""" import os from setuptools_scm import get_version def get_version_from_scm_tag( *, root='.', relative_to=None, local_scheme='node-and-date', ) -> str: """Retrieve the version from SCM tag in Git or Hg.""" try: return get_version( root=root, relative_to=relative_to, local_scheme=local_scheme, ) except LookupError: return 'unknown' def cut_local_version_on_upload(version): """Return empty local version if uploading to PyPI.""" is_pypi_upload = os.getenv('PYPI_UPLOAD') == 'true' if is_pypi_upload: return '' import setuptools_scm.version # only available during setup time return setuptools_scm.version.get_local_node_and_date(version) def get_self_version(): """Calculate the version of the dist itself.""" return get_version_from_scm_tag(local_scheme=cut_local_version_on_upload)
16a36338fecb21fb3e9e6a15a7af1a438da48c79
apps/jobs/migrations/0003_jobs_per_page.py
apps/jobs/migrations/0003_jobs_per_page.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0002_auto_20140925_1117'), ] operations = [ migrations.AddField( model_name='jobs', name='per_page', field=models.IntegerField(default=5, null=True, verbose_name=b'jobs per page', blank=True), preserve_default=True, ), ]
Add missing `per_page` migration file.
Add missing `per_page` migration file.
Python
mit
onespacemedia/cms-jobs,onespacemedia/cms-jobs
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0002_auto_20140925_1117'), ] operations = [ migrations.AddField( model_name='jobs', name='per_page', field=models.IntegerField(default=5, null=True, verbose_name=b'jobs per page', blank=True), preserve_default=True, ), ]
Add missing `per_page` migration file.
02b67810263ac5a39882a1e12a78ba28249dbc0a
webapp/config/settings/development.py
webapp/config/settings/development.py
from .base import * DEBUG = True # TEMPLATES[1]['DEBUG'] = True DATABASES = { 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, '..', 'tmp', 'db.sqlite3'), 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'compass_webapp_dev', 'USER': 'compass_webapp', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '5432', } } DEVELOPMENT_APPS = [ 'debug_toolbar', ] INSTALLED_APPS += DEVELOPMENT_APPS
from .base import * DEBUG = True # TEMPLATES[1]['DEBUG'] = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'compass_webapp_dev', 'USER': 'compass_webapp', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '5432', } } DEVELOPMENT_APPS = [ 'debug_toolbar', ] INSTALLED_APPS += DEVELOPMENT_APPS
Remove sql comments from settings file
Remove sql comments from settings file
Python
apache-2.0
patrickspencer/compass-python,patrickspencer/compass,patrickspencer/compass-python,patrickspencer/compass-python,patrickspencer/compass-python,patrickspencer/compass-python,patrickspencer/compass,patrickspencer/compass,patrickspencer/compass,patrickspencer/compass
from .base import * DEBUG = True # TEMPLATES[1]['DEBUG'] = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'compass_webapp_dev', 'USER': 'compass_webapp', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '5432', } } DEVELOPMENT_APPS = [ 'debug_toolbar', ] INSTALLED_APPS += DEVELOPMENT_APPS
Remove sql comments from settings file from .base import * DEBUG = True # TEMPLATES[1]['DEBUG'] = True DATABASES = { 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, '..', 'tmp', 'db.sqlite3'), 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'compass_webapp_dev', 'USER': 'compass_webapp', 'PASSWORD': 'password', 'HOST': 'localhost', 'PORT': '5432', } } DEVELOPMENT_APPS = [ 'debug_toolbar', ] INSTALLED_APPS += DEVELOPMENT_APPS
ff476b33c26a9067e6ac64b2c161d29b0febea33
py/capnptools/examples/tests/test_books.py
py/capnptools/examples/tests/test_books.py
import unittest from examples import books class BooksTest(unittest.TestCase): def test_builder(self): book = books.MallocMessageBuilder().init_root(books.Book) book.title = 'Moby-Dick; or, The Whale' book.authors = ['Herman Melville'] self.assertEqual( { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], }, book._as_dict(), ) book = book._as_reader() self.assertEqual('Moby-Dick; or, The Whale', book.title) self.assertEqual(['Herman Melville'], book.authors._as_dict()) self.assertEqual( { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], }, book._as_dict(), ) if __name__ == '__main__': unittest.main()
import unittest import os import tempfile from examples import books class BooksTest(unittest.TestCase): BOOK = { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], } def test_builder(self): book = books.MallocMessageBuilder().init_root(books.Book) book.title = self.BOOK['title'] book.authors = self.BOOK['authors'] self.assertEqual(self.BOOK, book._as_dict()) book = book._as_reader() self.assertEqual(self.BOOK['title'], book.title) self.assertEqual(self.BOOK['authors'], book.authors._as_dict()) self.assertEqual(self.BOOK, book._as_dict()) def test_write(self): builder = books.MallocMessageBuilder() book = builder.init_root(books.Book) book.title = self.BOOK['title'] book.authors = self.BOOK['authors'] for read_cls, write_func in [ ('StreamFdMessageReader', 'write_to'), ('PackedFdMessageReader', 'write_packed_to')]: with self.subTest(read_cls=read_cls, write_func=write_func): fd, path = tempfile.mkstemp() try: getattr(builder, write_func)(fd) os.close(fd) fd = os.open(path, os.O_RDONLY) reader = getattr(books, read_cls)(fd) book = reader.get_root(books.Book) self.assertEqual(self.BOOK, book._as_dict()) finally: os.unlink(path) os.close(fd) if __name__ == '__main__': unittest.main()
Add unit tests for write_to and write_packed_to
Add unit tests for write_to and write_packed_to
Python
mit
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
import unittest import os import tempfile from examples import books class BooksTest(unittest.TestCase): BOOK = { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], } def test_builder(self): book = books.MallocMessageBuilder().init_root(books.Book) book.title = self.BOOK['title'] book.authors = self.BOOK['authors'] self.assertEqual(self.BOOK, book._as_dict()) book = book._as_reader() self.assertEqual(self.BOOK['title'], book.title) self.assertEqual(self.BOOK['authors'], book.authors._as_dict()) self.assertEqual(self.BOOK, book._as_dict()) def test_write(self): builder = books.MallocMessageBuilder() book = builder.init_root(books.Book) book.title = self.BOOK['title'] book.authors = self.BOOK['authors'] for read_cls, write_func in [ ('StreamFdMessageReader', 'write_to'), ('PackedFdMessageReader', 'write_packed_to')]: with self.subTest(read_cls=read_cls, write_func=write_func): fd, path = tempfile.mkstemp() try: getattr(builder, write_func)(fd) os.close(fd) fd = os.open(path, os.O_RDONLY) reader = getattr(books, read_cls)(fd) book = reader.get_root(books.Book) self.assertEqual(self.BOOK, book._as_dict()) finally: os.unlink(path) os.close(fd) if __name__ == '__main__': unittest.main()
Add unit tests for write_to and write_packed_to import unittest from examples import books class BooksTest(unittest.TestCase): def test_builder(self): book = books.MallocMessageBuilder().init_root(books.Book) book.title = 'Moby-Dick; or, The Whale' book.authors = ['Herman Melville'] self.assertEqual( { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], }, book._as_dict(), ) book = book._as_reader() self.assertEqual('Moby-Dick; or, The Whale', book.title) self.assertEqual(['Herman Melville'], book.authors._as_dict()) self.assertEqual( { 'title': 'Moby-Dick; or, The Whale', 'authors': ['Herman Melville'], }, book._as_dict(), ) if __name__ == '__main__': unittest.main()
f9e1c2bd5976623bcebbb4b57fb011eb4d1737bc
support/appveyor-build.py
support/appveyor-build.py
#!/usr/bin/env python # Build the project on AppVeyor. import os from download import Downloader from subprocess import check_call build = os.environ['BUILD'] cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + os.environ['CONFIG']] build_command = ['msbuild', '/m:4', '/p:Config=' + os.environ['CONFIG'], 'FORMAT.sln'] test_command = ['msbuild', 'RUN_TESTS.vcxproj'] if build == 'mingw': # Install MinGW. mingw_url = 'http://ufpr.dl.sourceforge.net/project/mingw-w64/' + \ 'Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/' + \ '4.9.2/threads-win32/seh/x86_64-4.9.2-release-win32-seh-rt_v3-rev1.7z' with Downloader().download(mingw_url) as f: check_call(['7z', 'x', '-oC:\\', f]) # Remove path to Git bin directory from $PATH because it breaks MinGW config. path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '') os.environ['PATH'] = r'C:\Program Files (x86)\MSBUILD\12.0\bin\;' + path + r';C:\mingw64\bin' cmake_command.append('-GMinGW Makefiles') build_command = ['mingw32-make', '-j4'] test_command = ['mingw32-make', 'test'] check_call(cmake_command) check_call(build_command) check_call(test_command)
#!/usr/bin/env python # Build the project on AppVeyor. import os from subprocess import check_call build = os.environ['BUILD'] config = os.environ['CONFIG'] cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config] if build == 'mingw': cmake_command.append('-GMinGW Makefiles') build_command = ['mingw32-make', '-j4'] test_command = ['mingw32-make', 'test'] else: build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln'] test_command = ['msbuild', 'RUN_TESTS.vcxproj'] check_call(cmake_command) check_call(build_command) check_call(test_command)
Use preinstalled mingw on appveyor
Use preinstalled mingw on appveyor
Python
bsd-2-clause
lightslife/cppformat,blaquee/cppformat,mojoBrendan/fmt,alabuzhev/fmt,alabuzhev/fmt,wangshijin/cppformat,nelson4722/cppformat,cppformat/cppformat,cppformat/cppformat,alabuzhev/fmt,blaquee/cppformat,wangshijin/cppformat,Jopie64/cppformat,nelson4722/cppformat,lightslife/cppformat,cppformat/cppformat,seungrye/cppformat,Jopie64/cppformat,nelson4722/cppformat,dean0x7d/cppformat,seungrye/cppformat,lightslife/cppformat,dean0x7d/cppformat,mojoBrendan/fmt,wangshijin/cppformat,blaquee/cppformat,seungrye/cppformat,Jopie64/cppformat,mojoBrendan/fmt,dean0x7d/cppformat
#!/usr/bin/env python # Build the project on AppVeyor. import os from subprocess import check_call build = os.environ['BUILD'] config = os.environ['CONFIG'] cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config] if build == 'mingw': cmake_command.append('-GMinGW Makefiles') build_command = ['mingw32-make', '-j4'] test_command = ['mingw32-make', 'test'] else: build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln'] test_command = ['msbuild', 'RUN_TESTS.vcxproj'] check_call(cmake_command) check_call(build_command) check_call(test_command)
Use preinstalled mingw on appveyor #!/usr/bin/env python # Build the project on AppVeyor. import os from download import Downloader from subprocess import check_call build = os.environ['BUILD'] cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + os.environ['CONFIG']] build_command = ['msbuild', '/m:4', '/p:Config=' + os.environ['CONFIG'], 'FORMAT.sln'] test_command = ['msbuild', 'RUN_TESTS.vcxproj'] if build == 'mingw': # Install MinGW. mingw_url = 'http://ufpr.dl.sourceforge.net/project/mingw-w64/' + \ 'Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/' + \ '4.9.2/threads-win32/seh/x86_64-4.9.2-release-win32-seh-rt_v3-rev1.7z' with Downloader().download(mingw_url) as f: check_call(['7z', 'x', '-oC:\\', f]) # Remove path to Git bin directory from $PATH because it breaks MinGW config. path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '') os.environ['PATH'] = r'C:\Program Files (x86)\MSBUILD\12.0\bin\;' + path + r';C:\mingw64\bin' cmake_command.append('-GMinGW Makefiles') build_command = ['mingw32-make', '-j4'] test_command = ['mingw32-make', 'test'] check_call(cmake_command) check_call(build_command) check_call(test_command)
d190a376442dd5e9516b7bef802369a5fe318f03
find_primes.py
find_primes.py
#!/usr/bin/env python2 def find_primes(limit): primes = [] for candidate in range(2, limit + 1): candidate_ok = True for divisor in range(2, candidate): if candidate % divisor == 0: candidate_ok = False break if candidate_ok: primes.append(candidate) return primes if __name__ == '__main__': import sys limit = int(sys.argv[1]) print find_primes(limit)
Implement the 'trial division' algorithm.
Implement the 'trial division' algorithm.
Python
mit
ipqb/bootcamp-primes-activity
#!/usr/bin/env python2 def find_primes(limit): primes = [] for candidate in range(2, limit + 1): candidate_ok = True for divisor in range(2, candidate): if candidate % divisor == 0: candidate_ok = False break if candidate_ok: primes.append(candidate) return primes if __name__ == '__main__': import sys limit = int(sys.argv[1]) print find_primes(limit)
Implement the 'trial division' algorithm.
a3638f641098b1e713492d1a5fd832c8f9c3da5d
resolwe/flow/migrations/0005_duplicate_data_dependency.py
resolwe/flow/migrations/0005_duplicate_data_dependency.py
# Generated by Django 3.1.7 on 2021-10-12 10:39 from django.db import migrations, models def create_duplicate_dependencies(apps, schema_editor): Data = apps.get_model("flow", "Data") DataDependency = apps.get_model("flow", "DataDependency") duplicates = Data.objects.filter(duplicated__isnull=False) duplicates_without_relation = duplicates.exclude( parents_dependency__kind="duplicate" ).annotate( parent_id=models.Subquery( Data.objects.filter( location_id=models.OuterRef("location_id"), duplicated__isnull=True ).values("id") ) ) DataDependency.objects.bulk_create( DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate) for duplicate in duplicates_without_relation ) class Migration(migrations.Migration): dependencies = [ ("flow", "0004_data_process_resources"), ] operations = [ migrations.RunPython(create_duplicate_dependencies), ]
Add missing DataDependency objects for duplicates
Add missing DataDependency objects for duplicates
Python
apache-2.0
genialis/resolwe,genialis/resolwe
# Generated by Django 3.1.7 on 2021-10-12 10:39 from django.db import migrations, models def create_duplicate_dependencies(apps, schema_editor): Data = apps.get_model("flow", "Data") DataDependency = apps.get_model("flow", "DataDependency") duplicates = Data.objects.filter(duplicated__isnull=False) duplicates_without_relation = duplicates.exclude( parents_dependency__kind="duplicate" ).annotate( parent_id=models.Subquery( Data.objects.filter( location_id=models.OuterRef("location_id"), duplicated__isnull=True ).values("id") ) ) DataDependency.objects.bulk_create( DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate) for duplicate in duplicates_without_relation ) class Migration(migrations.Migration): dependencies = [ ("flow", "0004_data_process_resources"), ] operations = [ migrations.RunPython(create_duplicate_dependencies), ]
Add missing DataDependency objects for duplicates
60951f30d8b5e2a450c13aa2b146be14ceb53c4d
rolldembones.py
rolldembones.py
#!/usr/bin/python import argparse import dice def main(): roller = dice.Roller(args) for repeat in range(args.repeats): roller.do_roll() for result in roller: if isinstance(result, list): print(' '.join(map(str, result))) else: print(result) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Roll some dice.") parser.add_argument("-r, --repeat", dest="repeats", type=int, metavar="N", default=1, help="Repeat these rolls N times.") parser.add_argument("-e, --explode", dest="explode", metavar="E", type=int, default=None, help="Any die whose roll matches or exceeds E is counted and rolled again. Set to 1 or lower to disable this behavior on special dice.") parser.add_argument("dice", nargs='*', help="Dice to roll, given in pairs of the number of dice to roll, and the sides those dice have.") args = parser.parse_args() # some basic error checking if len(args.dice)%2 != 0: parser.error("Incorrect number of arguments: Rolls and faces must be paired") main()
#!/usr/bin/python3 import argparse import dice def main(): roller = dice.Roller(args) for repeat in range(args.repeats): roller.do_roll() for result in roller: if isinstance(result, list): print(' '.join(map(str, result))) else: print(result) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Roll some dice.") parser.add_argument("-r, --repeat", dest="repeats", type=int, metavar="N", default=1, help="Repeat these rolls N times.") parser.add_argument("-e, --explode", dest="explode", metavar="E", type=int, default=None, help="Any die whose roll matches or exceeds E is counted and rolled again. Set to 1 or lower to disable this behavior on special dice.") parser.add_argument("dice", nargs='*', help="Dice to roll, given in pairs of the number of dice to roll, and the sides those dice have.") args = parser.parse_args() # some basic error checking if len(args.dice)%2 != 0: parser.error("Incorrect number of arguments: Rolls and faces must be paired") main()
Update shebang to request python 3
Update shebang to request python 3
Python
mit
aurule/rolldembones
#!/usr/bin/python3 import argparse import dice def main(): roller = dice.Roller(args) for repeat in range(args.repeats): roller.do_roll() for result in roller: if isinstance(result, list): print(' '.join(map(str, result))) else: print(result) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Roll some dice.") parser.add_argument("-r, --repeat", dest="repeats", type=int, metavar="N", default=1, help="Repeat these rolls N times.") parser.add_argument("-e, --explode", dest="explode", metavar="E", type=int, default=None, help="Any die whose roll matches or exceeds E is counted and rolled again. Set to 1 or lower to disable this behavior on special dice.") parser.add_argument("dice", nargs='*', help="Dice to roll, given in pairs of the number of dice to roll, and the sides those dice have.") args = parser.parse_args() # some basic error checking if len(args.dice)%2 != 0: parser.error("Incorrect number of arguments: Rolls and faces must be paired") main()
Update shebang to request python 3 #!/usr/bin/python import argparse import dice def main(): roller = dice.Roller(args) for repeat in range(args.repeats): roller.do_roll() for result in roller: if isinstance(result, list): print(' '.join(map(str, result))) else: print(result) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Roll some dice.") parser.add_argument("-r, --repeat", dest="repeats", type=int, metavar="N", default=1, help="Repeat these rolls N times.") parser.add_argument("-e, --explode", dest="explode", metavar="E", type=int, default=None, help="Any die whose roll matches or exceeds E is counted and rolled again. Set to 1 or lower to disable this behavior on special dice.") parser.add_argument("dice", nargs='*', help="Dice to roll, given in pairs of the number of dice to roll, and the sides those dice have.") args = parser.parse_args() # some basic error checking if len(args.dice)%2 != 0: parser.error("Incorrect number of arguments: Rolls and faces must be paired") main()
8a2979ae72bcd691521e2694c974219edfe5dc3b
altair/examples/top_k_with_others.py
altair/examples/top_k_with_others.py
""" Top-K plot with Others ---------------------- This example shows how to use aggregate, window, and calculate transfromations to display the top-k directors by average worldwide gross while grouping the remaining directors as 'All Others'. """ # category: case studies import altair as alt from vega_datasets import data source = data.movies() alt.Chart(source).mark_bar().encode( x=alt.X("aggregate_gross:Q", aggregate="mean", title=None), y=alt.Y( "ranked_director:N", sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"), title=None, ), ).transform_aggregate( aggregate=[ alt.AggregatedFieldDef( **{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"} ) ], groupby=["Director"], ).transform_window( window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})], sort=[alt.SortField("aggregate_gross", order="descending")], ).transform_calculate( as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'" ).properties( title="Top Directors by Average Worldwide Gross", )
Add example for Top-K with Others.
DOC: Add example for Top-K with Others.
Python
bsd-3-clause
altair-viz/altair,jakevdp/altair
""" Top-K plot with Others ---------------------- This example shows how to use aggregate, window, and calculate transfromations to display the top-k directors by average worldwide gross while grouping the remaining directors as 'All Others'. """ # category: case studies import altair as alt from vega_datasets import data source = data.movies() alt.Chart(source).mark_bar().encode( x=alt.X("aggregate_gross:Q", aggregate="mean", title=None), y=alt.Y( "ranked_director:N", sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"), title=None, ), ).transform_aggregate( aggregate=[ alt.AggregatedFieldDef( **{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"} ) ], groupby=["Director"], ).transform_window( window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})], sort=[alt.SortField("aggregate_gross", order="descending")], ).transform_calculate( as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'" ).properties( title="Top Directors by Average Worldwide Gross", )
DOC: Add example for Top-K with Others.
ab14f4c86fca6daab9d67cc9b4c3581d76d5635a
foster/utils.py
foster/utils.py
import os.path import shutil from string import Template PIKE_DIR = os.path.dirname(__file__) SAMPLES_DIR = os.path.join(PIKE_DIR, 'samples') def sample_path(sample): path = os.path.join(SAMPLES_DIR, sample) return os.path.realpath(path) def copy_sample(sample, target): source = os.path.join(SAMPLES_DIR, sample) shutil.copy(source, target) def render_sample(sample, **kwargs): source = os.path.join(SAMPLES_DIR, sample) with open(source, 'r') as f: text = f.read() template = Template(text) return template.substitute(kwargs)
import os.path import shutil from string import Template PIKE_DIR = os.path.dirname(__file__) SAMPLES_DIR = os.path.join(PIKE_DIR, 'samples') def sample_path(sample): path = os.path.join(SAMPLES_DIR, sample) return os.path.realpath(path) def copy_sample(sample, target): source = os.path.join(SAMPLES_DIR, sample) shutil.copy(source, target) def render_sample(sample, **kwargs): source = os.path.join(SAMPLES_DIR, sample) with open(source, 'r') as f: text = f.read() template = Template(text) return template.substitute(kwargs)
Fix whitespace in foster/util.py to better comply with PEP8
Fix whitespace in foster/util.py to better comply with PEP8
Python
mit
hugollm/foster,hugollm/foster
import os.path import shutil from string import Template PIKE_DIR = os.path.dirname(__file__) SAMPLES_DIR = os.path.join(PIKE_DIR, 'samples') def sample_path(sample): path = os.path.join(SAMPLES_DIR, sample) return os.path.realpath(path) def copy_sample(sample, target): source = os.path.join(SAMPLES_DIR, sample) shutil.copy(source, target) def render_sample(sample, **kwargs): source = os.path.join(SAMPLES_DIR, sample) with open(source, 'r') as f: text = f.read() template = Template(text) return template.substitute(kwargs)
Fix whitespace in foster/util.py to better comply with PEP8 import os.path import shutil from string import Template PIKE_DIR = os.path.dirname(__file__) SAMPLES_DIR = os.path.join(PIKE_DIR, 'samples') def sample_path(sample): path = os.path.join(SAMPLES_DIR, sample) return os.path.realpath(path) def copy_sample(sample, target): source = os.path.join(SAMPLES_DIR, sample) shutil.copy(source, target) def render_sample(sample, **kwargs): source = os.path.join(SAMPLES_DIR, sample) with open(source, 'r') as f: text = f.read() template = Template(text) return template.substitute(kwargs)
84b31bb02746dec1667cc93a189c6e1c40ffac28
studygroups/migrations/0015_auto_20150430_0126.py
studygroups/migrations/0015_auto_20150430_0126.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('studygroups', '0014_application_accepted_at'), ] operations = [ migrations.AlterUniqueTogether( name='studygroupsignup', unique_together=set([]), ), ]
Remove unique requirement for study group applications
Remove unique requirement for study group applications
Python
mit
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('studygroups', '0014_application_accepted_at'), ] operations = [ migrations.AlterUniqueTogether( name='studygroupsignup', unique_together=set([]), ), ]
Remove unique requirement for study group applications
5041862eafcd4b8799f8ab97c25df7d494d6c2ad
blockbuster/bb_logging.py
blockbuster/bb_logging.py
import config import logging import logging.handlers # ######### Set up logging ########## # log.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s", level=log.DEBUG) logger = logging.getLogger("blockbuster") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages tfh = logging.handlers.TimedRotatingFileHandler(str.format('{0}/app.log', config.log_directory), when='midnight', delay=False, encoding=None, backupCount=7) tfh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatterch = logging.Formatter('%(asctime)s %(levelname)s %(message)s') formattertfh = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') ch.setFormatter(formatterch) tfh.setFormatter(formattertfh) # add the handlers to logger logger.addHandler(ch) logger.addHandler(tfh)
import config import logging import logging.handlers # ######### Set up logging ########## # log.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s", level=log.DEBUG) logger = logging.getLogger("blockbuster") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages tfh = logging.handlers.TimedRotatingFileHandler(str.format('{0}/app.log', config.log_directory), when='midnight', delay=False, encoding=None, backupCount=7) tfh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatterch = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') formattertfh = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') ch.setFormatter(formatterch) tfh.setFormatter(formattertfh) # add the handlers to logger logger.addHandler(ch) logger.addHandler(tfh)
Change format of log lines
Change format of log lines
Python
mit
mattstibbs/blockbuster-server,mattstibbs/blockbuster-server
import config import logging import logging.handlers # ######### Set up logging ########## # log.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s", level=log.DEBUG) logger = logging.getLogger("blockbuster") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages tfh = logging.handlers.TimedRotatingFileHandler(str.format('{0}/app.log', config.log_directory), when='midnight', delay=False, encoding=None, backupCount=7) tfh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatterch = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') formattertfh = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') ch.setFormatter(formatterch) tfh.setFormatter(formattertfh) # add the handlers to logger logger.addHandler(ch) logger.addHandler(tfh)
Change format of log lines import config import logging import logging.handlers # ######### Set up logging ########## # log.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s", level=log.DEBUG) logger = logging.getLogger("blockbuster") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages tfh = logging.handlers.TimedRotatingFileHandler(str.format('{0}/app.log', config.log_directory), when='midnight', delay=False, encoding=None, backupCount=7) tfh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatterch = logging.Formatter('%(asctime)s %(levelname)s %(message)s') formattertfh = logging.Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s') ch.setFormatter(formatterch) tfh.setFormatter(formattertfh) # add the handlers to logger logger.addHandler(ch) logger.addHandler(tfh)
34a3bf209c1bb09e2057eb4dd91ef426e3107c11
monitor_temperature.py
monitor_temperature.py
import time import serial import matplotlib.pyplot as plt import csv import os import brewkettle reload(brewkettle) filename = time.strftime("%Y-%m-%d %H:%M") + ".csv" path = os.path.join("data", filename) f = open(path, "w") csv_writer = csv.writer(f) kettle = brewkettle.BrewKettle() kettle.turn_pump_on() start = time.time() previous = start while(True): try: now = time.time() if (now - previous > 2): temperature = kettle.get_temperature() current = now - start print "Time:\t\t" + str(current) print "Temperature:\t" + str(temperature) csv_writer.writerow((current, temperature)) previous = now except KeyboardInterrupt: f.close() kettle.exit() print "Done" break
import time import serial import matplotlib.pyplot as plt import csv import os import brewkettle reload(brewkettle) filename = time.strftime("%Y-%m-%d %H:%M") + ".csv" path = os.path.join("data", filename) f = open(path, "w") csv_writer = csv.writer(f) csv_writer.writerow(["Time [s]", "Temperature [C]"]) kettle = brewkettle.BrewKettle() kettle.turn_pump_on() kettle.turn_heater_on() start = time.time() previous = 0 while(True): try: now = time.time() if (now - previous > 10): temperature = kettle.get_temperature() current = now - start print "Time:\t\t" + str(current) print "Temperature:\t" + str(temperature) csv_writer.writerow((current, temperature)) previous = now except KeyboardInterrupt: f.close() kettle.exit() print "Done" break
Monitor temperature script as used for heating measurement
Monitor temperature script as used for heating measurement
Python
mit
beercanlah/ardumashtun,beercanlah/ardumashtun
import time import serial import matplotlib.pyplot as plt import csv import os import brewkettle reload(brewkettle) filename = time.strftime("%Y-%m-%d %H:%M") + ".csv" path = os.path.join("data", filename) f = open(path, "w") csv_writer = csv.writer(f) csv_writer.writerow(["Time [s]", "Temperature [C]"]) kettle = brewkettle.BrewKettle() kettle.turn_pump_on() kettle.turn_heater_on() start = time.time() previous = 0 while(True): try: now = time.time() if (now - previous > 10): temperature = kettle.get_temperature() current = now - start print "Time:\t\t" + str(current) print "Temperature:\t" + str(temperature) csv_writer.writerow((current, temperature)) previous = now except KeyboardInterrupt: f.close() kettle.exit() print "Done" break
Monitor temperature script as used for heating measurement import time import serial import matplotlib.pyplot as plt import csv import os import brewkettle reload(brewkettle) filename = time.strftime("%Y-%m-%d %H:%M") + ".csv" path = os.path.join("data", filename) f = open(path, "w") csv_writer = csv.writer(f) kettle = brewkettle.BrewKettle() kettle.turn_pump_on() start = time.time() previous = start while(True): try: now = time.time() if (now - previous > 2): temperature = kettle.get_temperature() current = now - start print "Time:\t\t" + str(current) print "Temperature:\t" + str(temperature) csv_writer.writerow((current, temperature)) previous = now except KeyboardInterrupt: f.close() kettle.exit() print "Done" break
2758c1086e06a77f9676d678a3d41a53a352ec01
testfixtures/seating.py
testfixtures/seating.py
# -*- coding: utf-8 -*- """ testfixtures.seating ~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2016 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from byceps.services.seating.models.seat_group import SeatGroup def create_seat_group(party_id, seat_category, title, *, seat_quantity=4): return SeatGroup(party_id, seat_category, seat_quantity, title)
# -*- coding: utf-8 -*- """ testfixtures.seating ~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2016 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from byceps.services.seating.models.category import Category from byceps.services.seating.models.seat_group import SeatGroup def create_seat_category(party_id, title): return SeatCategory(party_id, title) def create_seat_group(party_id, seat_category, title, *, seat_quantity=4): return SeatGroup(party_id, seat_category, seat_quantity, title)
Add function to create a seat category test fixture
Add function to create a seat category test fixture
Python
bsd-3-clause
m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
# -*- coding: utf-8 -*- """ testfixtures.seating ~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2016 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from byceps.services.seating.models.category import Category from byceps.services.seating.models.seat_group import SeatGroup def create_seat_category(party_id, title): return SeatCategory(party_id, title) def create_seat_group(party_id, seat_category, title, *, seat_quantity=4): return SeatGroup(party_id, seat_category, seat_quantity, title)
Add function to create a seat category test fixture # -*- coding: utf-8 -*- """ testfixtures.seating ~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2016 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from byceps.services.seating.models.seat_group import SeatGroup def create_seat_group(party_id, seat_category, title, *, seat_quantity=4): return SeatGroup(party_id, seat_category, seat_quantity, title)
55a8921f3634fe842eddf202d1237f53ca6d003b
kobo/settings/dev.py
kobo/settings/dev.py
# coding: utf-8 from .base import * LOGGING['handlers']['console'] = { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',) MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware') # Comment out the line below to use `Django Debug Toolbar` # INTERNAL_IPS = ['172.28.0.4'] # Change IP to KPI container's IP ENV = 'dev' # Expiration time in sec. after which paired data xml file must be regenerated # Does not need to match KoBoCAT setting PAIRED_DATA_EXPIRATION = 5 # Minimum size (in bytes) of files to allow fast calculation of hashes # Should match KoBoCAT setting HASH_BIG_FILE_SIZE_THRESHOLD = 200 * 1024 # 200 kB # Chunk size in bytes to read per iteration when hash of a file is calculated # Should match KoBoCAT setting HASH_BIG_FILE_CHUNK = 5 * 1024 # 5 kB
# coding: utf-8 from .base import * LOGGING['handlers']['console'] = { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',) MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware') def show_toolbar(request): return env.bool("DEBUG_TOOLBAR", False) DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": show_toolbar} ENV = 'dev' # Expiration time in sec. after which paired data xml file must be regenerated # Does not need to match KoBoCAT setting PAIRED_DATA_EXPIRATION = 5 # Minimum size (in bytes) of files to allow fast calculation of hashes # Should match KoBoCAT setting HASH_BIG_FILE_SIZE_THRESHOLD = 200 * 1024 # 200 kB # Chunk size in bytes to read per iteration when hash of a file is calculated # Should match KoBoCAT setting HASH_BIG_FILE_CHUNK = 5 * 1024 # 5 kB
Enable django debug toolbar via env var
Enable django debug toolbar via env var
Python
agpl-3.0
kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi
# coding: utf-8 from .base import * LOGGING['handlers']['console'] = { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',) MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware') def show_toolbar(request): return env.bool("DEBUG_TOOLBAR", False) DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": show_toolbar} ENV = 'dev' # Expiration time in sec. after which paired data xml file must be regenerated # Does not need to match KoBoCAT setting PAIRED_DATA_EXPIRATION = 5 # Minimum size (in bytes) of files to allow fast calculation of hashes # Should match KoBoCAT setting HASH_BIG_FILE_SIZE_THRESHOLD = 200 * 1024 # 200 kB # Chunk size in bytes to read per iteration when hash of a file is calculated # Should match KoBoCAT setting HASH_BIG_FILE_CHUNK = 5 * 1024 # 5 kB
Enable django debug toolbar via env var # coding: utf-8 from .base import * LOGGING['handlers']['console'] = { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',) MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware') # Comment out the line below to use `Django Debug Toolbar` # INTERNAL_IPS = ['172.28.0.4'] # Change IP to KPI container's IP ENV = 'dev' # Expiration time in sec. after which paired data xml file must be regenerated # Does not need to match KoBoCAT setting PAIRED_DATA_EXPIRATION = 5 # Minimum size (in bytes) of files to allow fast calculation of hashes # Should match KoBoCAT setting HASH_BIG_FILE_SIZE_THRESHOLD = 200 * 1024 # 200 kB # Chunk size in bytes to read per iteration when hash of a file is calculated # Should match KoBoCAT setting HASH_BIG_FILE_CHUNK = 5 * 1024 # 5 kB
b000fc19657b80c46ca9c2d7e6dfdaa16e4d400f
scripts/slave/apply_svn_patch.py
scripts/slave/apply_svn_patch.py
#!/usr/bin/python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import optparse import subprocess import sys def main(): parser = optparse.OptionParser() parser.add_option('-p', '--patch-url', help='The SVN URL to download the patch from.') parser.add_option('-r', '--root-dir', help='The root dir in which to apply patch.') options, args = parser.parse_args() if args: parser.error('Unused args: %s' % args) if not (options.patch_url and options.root_dir): parser.error('A patch URL and root directory should be specified.') svn_cat = subprocess.Popen(['svn', 'cat', options.patch_url], stdout=subprocess.PIPE) patch = subprocess.Popen(['patch', '-t', '-p', '0', '-d', options.root_dir], stdin=svn_cat.stdout) _, err = patch.communicate() return err or None if __name__ == '__main__': sys.exit(main())
Add a script which can apply a try job SVN patch via an annotated step.
Add a script which can apply a try job SVN patch via an annotated step. Review URL: https://chromiumcodereview.appspot.com/24688002 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@225287 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
eunchong/build,eunchong/build,eunchong/build,eunchong/build
#!/usr/bin/python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import optparse import subprocess import sys def main(): parser = optparse.OptionParser() parser.add_option('-p', '--patch-url', help='The SVN URL to download the patch from.') parser.add_option('-r', '--root-dir', help='The root dir in which to apply patch.') options, args = parser.parse_args() if args: parser.error('Unused args: %s' % args) if not (options.patch_url and options.root_dir): parser.error('A patch URL and root directory should be specified.') svn_cat = subprocess.Popen(['svn', 'cat', options.patch_url], stdout=subprocess.PIPE) patch = subprocess.Popen(['patch', '-t', '-p', '0', '-d', options.root_dir], stdin=svn_cat.stdout) _, err = patch.communicate() return err or None if __name__ == '__main__': sys.exit(main())
Add a script which can apply a try job SVN patch via an annotated step. Review URL: https://chromiumcodereview.appspot.com/24688002 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@225287 0039d316-1c4b-4281-b951-d872f2087c98
3681b5a485662656d6419d95ad89f1fbdb7a2a50
myuw/context_processors.py
myuw/context_processors.py
# Determins if the requesting device is a native hybrid app (android/ios) def is_hybrid(request): return { 'is_hybrid': 'HTTP_MYUW_HYBRID' in request.META }
# Determins if the requesting device is a native hybrid app (android/ios) def is_hybrid(request): return { 'is_hybrid': 'MyUW_Hybrid/1.0' in request.META['HTTP_USER_AGENT'] }
Update context processer to check for custom hybrid user agent.
Update context processer to check for custom hybrid user agent.
Python
apache-2.0
uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw
# Determins if the requesting device is a native hybrid app (android/ios) def is_hybrid(request): return { 'is_hybrid': 'MyUW_Hybrid/1.0' in request.META['HTTP_USER_AGENT'] }
Update context processer to check for custom hybrid user agent. # Determins if the requesting device is a native hybrid app (android/ios) def is_hybrid(request): return { 'is_hybrid': 'HTTP_MYUW_HYBRID' in request.META }
1a16d598c902218a8112841219f89044724155da
smatic/templatetags/smatic_tags.py
smatic/templatetags/smatic_tags.py
import os from commands import getstatusoutput from django import template from django.conf import settings from django.utils._os import safe_join register = template.Library() def scss(file_path): """ Converts an scss file into css and returns the output """ input_path = safe_join(settings.SMATIC_SCSS_PATH, file_path) if not os.path.exists(input_path): raise Exception('File does not exist: %s\n' % input_path) sass_dict = { 'bin' : settings.SASS_BIN, 'sass_style' : 'compact', 'input' : input_path } cmd = "%(bin)s --scss -t %(sass_style)s -C %(input)s" % sass_dict (status, output) = getstatusoutput(cmd) if not status == 0: raise Exception(output) return output register.simple_tag(scss) def js(file_path): input_path = safe_join(settings.SMATIC_JS_PATH, file_path) if not os.path.exists(input_path): # TODO: check if enabled on raise Exception('File does not exist: %s\n' % input_path) return '<script type="text/javascript" src="%sjs/%s"></script>' % (settings.STATIC_URL, file_path) register.simple_tag(js)
import os from commands import getstatusoutput from django import template from django.conf import settings from django.utils._os import safe_join register = template.Library() @register.simple_tag def scss(file_path): """ Convert an scss file into css and returns the output. """ input_path = safe_join(settings.SMATIC_SCSS_PATH, file_path) if not os.path.exists(input_path): raise Exception('File does not exist: %s\n' % input_path) cmd = "%(bin)s --scss -t %(sass_style)s -C %(input)s" % { 'bin': getattr(settings, 'SASS_BIN', 'sass'), 'sass_style': 'compact', 'input': input_path, } (status, output) = getstatusoutput(cmd) if not status == 0: raise Exception(output) return output @register.simple_tag def js(file_path): input_path = safe_join(settings.SMATIC_JS_PATH, file_path) if not os.path.exists(input_path): # TODO: check if enabled on raise Exception('File does not exist: %s\n' % input_path) return '<script type="text/javascript" src="%sjs/%s"></script>' % ( settings.STATIC_URL, file_path )
Tidy up the code, and don't make settings.SASS_BIN a requirement (default to 'sass')
Tidy up the code, and don't make settings.SASS_BIN a requirement (default to 'sass')
Python
bsd-3-clause
lincolnloop/django-smatic
import os from commands import getstatusoutput from django import template from django.conf import settings from django.utils._os import safe_join register = template.Library() @register.simple_tag def scss(file_path): """ Convert an scss file into css and returns the output. """ input_path = safe_join(settings.SMATIC_SCSS_PATH, file_path) if not os.path.exists(input_path): raise Exception('File does not exist: %s\n' % input_path) cmd = "%(bin)s --scss -t %(sass_style)s -C %(input)s" % { 'bin': getattr(settings, 'SASS_BIN', 'sass'), 'sass_style': 'compact', 'input': input_path, } (status, output) = getstatusoutput(cmd) if not status == 0: raise Exception(output) return output @register.simple_tag def js(file_path): input_path = safe_join(settings.SMATIC_JS_PATH, file_path) if not os.path.exists(input_path): # TODO: check if enabled on raise Exception('File does not exist: %s\n' % input_path) return '<script type="text/javascript" src="%sjs/%s"></script>' % ( settings.STATIC_URL, file_path )
Tidy up the code, and don't make settings.SASS_BIN a requirement (default to 'sass') import os from commands import getstatusoutput from django import template from django.conf import settings from django.utils._os import safe_join register = template.Library() def scss(file_path): """ Converts an scss file into css and returns the output """ input_path = safe_join(settings.SMATIC_SCSS_PATH, file_path) if not os.path.exists(input_path): raise Exception('File does not exist: %s\n' % input_path) sass_dict = { 'bin' : settings.SASS_BIN, 'sass_style' : 'compact', 'input' : input_path } cmd = "%(bin)s --scss -t %(sass_style)s -C %(input)s" % sass_dict (status, output) = getstatusoutput(cmd) if not status == 0: raise Exception(output) return output register.simple_tag(scss) def js(file_path): input_path = safe_join(settings.SMATIC_JS_PATH, file_path) if not os.path.exists(input_path): # TODO: check if enabled on raise Exception('File does not exist: %s\n' % input_path) return '<script type="text/javascript" src="%sjs/%s"></script>' % (settings.STATIC_URL, file_path) register.simple_tag(js)
2306478f67a93e27dd9d7d397f97e3641df3516a
ipython_startup.py
ipython_startup.py
import scipy as sp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D
from __future__ import division from __future__ import absolute_import import scipy as sp import itertools as it import functools as ft import operator as op import sys import sympy # Plotting import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.pyplot import subplots from matplotlib.pyplot import show as pltshow # and import some common functions into the global namespace from scipy.linalg import norm from scipy import sin, cos, tan, log, pi, sqrt, exp, mean from math import atan2, acos from sympy import Rational as sRat from sympy import pretty as spretty
Add lots of useful default imports to ipython
Add lots of useful default imports to ipython
Python
cc0-1.0
davidshepherd7/dotfiles,davidshepherd7/dotfiles,davidshepherd7/dotfiles,davidshepherd7/dotfiles,davidshepherd7/dotfiles
from __future__ import division from __future__ import absolute_import import scipy as sp import itertools as it import functools as ft import operator as op import sys import sympy # Plotting import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.pyplot import subplots from matplotlib.pyplot import show as pltshow # and import some common functions into the global namespace from scipy.linalg import norm from scipy import sin, cos, tan, log, pi, sqrt, exp, mean from math import atan2, acos from sympy import Rational as sRat from sympy import pretty as spretty
Add lots of useful default imports to ipython import scipy as sp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D
5b2de46ac3c21278f1ab2c7620d3f31dc7d98530
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup version = '0.6.0.dev' required = open('requirements.txt').read().split('\n') extra = {'all': ['mist', 'rime']} setup( name='thunder-python', version=version, description='large-scale image and time series analysis', author='freeman-lab', author_email='[email protected]', url='https://github.com/thunder-project/thunder', packages=[ 'thunder', 'thunder', 'thunder.blocks', 'thunder.series', 'thunder.images' ], package_data={'thunder.lib': ['thunder_python-' + version + '-py2.7.egg']}, install_requires=required, extra_requires=extra, long_description='See https://github.com/thunder-project/thunder' )
#!/usr/bin/env python from setuptools import setup version = '0.6.0.dev' required = open('requirements.txt').read().split('\n') extra = {'all': ['mist', 'rime']} setup( name='thunder-python', version=version, description='large-scale image and time series analysis', author='freeman-lab', author_email='[email protected]', url='https://github.com/thunder-project/thunder', packages=[ 'thunder', 'thunder', 'thunder.blocks', 'thunder.series', 'thunder.images' ], package_data={'thunder.lib': ['thunder_python-' + version + '-py2.7.egg']}, install_requires=required, extras_require=extra, long_description='See https://github.com/thunder-project/thunder' )
Fix name for extra arguments
Fix name for extra arguments
Python
apache-2.0
thunder-project/thunder,jwittenbach/thunder,j-friedrich/thunder,j-friedrich/thunder
#!/usr/bin/env python from setuptools import setup version = '0.6.0.dev' required = open('requirements.txt').read().split('\n') extra = {'all': ['mist', 'rime']} setup( name='thunder-python', version=version, description='large-scale image and time series analysis', author='freeman-lab', author_email='[email protected]', url='https://github.com/thunder-project/thunder', packages=[ 'thunder', 'thunder', 'thunder.blocks', 'thunder.series', 'thunder.images' ], package_data={'thunder.lib': ['thunder_python-' + version + '-py2.7.egg']}, install_requires=required, extras_require=extra, long_description='See https://github.com/thunder-project/thunder' )
Fix name for extra arguments #!/usr/bin/env python from setuptools import setup version = '0.6.0.dev' required = open('requirements.txt').read().split('\n') extra = {'all': ['mist', 'rime']} setup( name='thunder-python', version=version, description='large-scale image and time series analysis', author='freeman-lab', author_email='[email protected]', url='https://github.com/thunder-project/thunder', packages=[ 'thunder', 'thunder', 'thunder.blocks', 'thunder.series', 'thunder.images' ], package_data={'thunder.lib': ['thunder_python-' + version + '-py2.7.egg']}, install_requires=required, extra_requires=extra, long_description='See https://github.com/thunder-project/thunder' )
5fb38bfb6eae77b7024bf4d9990472f60d576826
setup.py
setup.py
import pathlib from crc import LIBRARY_VERSION from setuptools import setup current = pathlib.Path(__file__).parent.resolve() def readme(): return (current / 'README.md').read_text(encoding='utf-8') if __name__ == '__main__': setup( name='crc', version=LIBRARY_VERSION, py_modules=['crc'], classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], url='https://github.com/Nicoretti/crc', license='BSD', python_requires='>=3.7', author='Nicola Coretti', author_email='[email protected]', description='Library and CLI to calculate and verify all kinds of CRC checksums.', keywords=['CRC', 'CRC8', 'CRC16', 'CRC32', 'CRC64'], long_description=readme(), long_description_content_type='text/markdown', entry_points={ 'console_scripts': [ 'crc=crc:main', ], } )
import pathlib from crc import LIBRARY_VERSION from setuptools import setup current = pathlib.Path(__file__).parent.resolve() def readme(): return (current / 'README.md').read_text(encoding='utf-8') if __name__ == '__main__': setup( name='crc', version=LIBRARY_VERSION, py_modules=['crc'], classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], url='https://github.com/Nicoretti/crc', license='BSD', python_requires='>=3.7', author='Nicola Coretti', author_email='[email protected]', description='Library and CLI to calculate and verify all kinds of CRC checksums.', keywords=['CRC', 'CRC8', 'CRC16', 'CRC32', 'CRC64'], long_description=readme(), long_description_content_type='text/markdown', entry_points={ 'console_scripts': [ 'crc=crc:main', ], } )
Update package information about supported python versions
Update package information about supported python versions
Python
bsd-2-clause
Nicoretti/crc
import pathlib from crc import LIBRARY_VERSION from setuptools import setup current = pathlib.Path(__file__).parent.resolve() def readme(): return (current / 'README.md').read_text(encoding='utf-8') if __name__ == '__main__': setup( name='crc', version=LIBRARY_VERSION, py_modules=['crc'], classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], url='https://github.com/Nicoretti/crc', license='BSD', python_requires='>=3.7', author='Nicola Coretti', author_email='[email protected]', description='Library and CLI to calculate and verify all kinds of CRC checksums.', keywords=['CRC', 'CRC8', 'CRC16', 'CRC32', 'CRC64'], long_description=readme(), long_description_content_type='text/markdown', entry_points={ 'console_scripts': [ 'crc=crc:main', ], } )
Update package information about supported python versions import pathlib from crc import LIBRARY_VERSION from setuptools import setup current = pathlib.Path(__file__).parent.resolve() def readme(): return (current / 'README.md').read_text(encoding='utf-8') if __name__ == '__main__': setup( name='crc', version=LIBRARY_VERSION, py_modules=['crc'], classifiers=[ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], url='https://github.com/Nicoretti/crc', license='BSD', python_requires='>=3.7', author='Nicola Coretti', author_email='[email protected]', description='Library and CLI to calculate and verify all kinds of CRC checksums.', keywords=['CRC', 'CRC8', 'CRC16', 'CRC32', 'CRC64'], long_description=readme(), long_description_content_type='text/markdown', entry_points={ 'console_scripts': [ 'crc=crc:main', ], } )
058b484b997158219b9c0eda34ec6ac3d897f563
setup.py
setup.py
import os import json from setuptools import setup with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding="utf-8") as f: readme = f.read() with open(os.path.join(os.path.dirname(__file__), 'package.json'), encoding="utf-8") as f: package = json.loads(f.read()) setup( name=package['name'], version=package['version'], description=package['description'], long_description=readme, long_description_content_type='text/markdown', author=package['author']['name'], author_email=package['author']['email'], url=package['homepage'], packages=['s3direct'], include_package_data=True, install_requires=['django>=1.8'], zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
import os import io import json from setuptools import setup with io.open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding="utf-8") as f: readme = f.read() with io.open(os.path.join(os.path.dirname(__file__), 'package.json'), encoding="utf-8") as f: package = json.loads(f.read()) setup( name=package['name'], version=package['version'], description=package['description'], long_description=readme, long_description_content_type='text/markdown', author=package['author']['name'], author_email=package['author']['email'], url=package['homepage'], packages=['s3direct'], include_package_data=True, install_requires=['django>=1.8'], zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
Use io.open for py2/py3 compat
Use io.open for py2/py3 compat
Python
mit
bradleyg/django-s3direct,bradleyg/django-s3direct,bradleyg/django-s3direct
import os import io import json from setuptools import setup with io.open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding="utf-8") as f: readme = f.read() with io.open(os.path.join(os.path.dirname(__file__), 'package.json'), encoding="utf-8") as f: package = json.loads(f.read()) setup( name=package['name'], version=package['version'], description=package['description'], long_description=readme, long_description_content_type='text/markdown', author=package['author']['name'], author_email=package['author']['email'], url=package['homepage'], packages=['s3direct'], include_package_data=True, install_requires=['django>=1.8'], zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
Use io.open for py2/py3 compat import os import json from setuptools import setup with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding="utf-8") as f: readme = f.read() with open(os.path.join(os.path.dirname(__file__), 'package.json'), encoding="utf-8") as f: package = json.loads(f.read()) setup( name=package['name'], version=package['version'], description=package['description'], long_description=readme, long_description_content_type='text/markdown', author=package['author']['name'], author_email=package['author']['email'], url=package['homepage'], packages=['s3direct'], include_package_data=True, install_requires=['django>=1.8'], zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
1869b79d49419799cecf1f5e19eb0aa3987e215b
tests/test_vector2_scalar_multiplication.py
tests/test_vector2_scalar_multiplication.py
import pytest # type: ignore from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected
import pytest # type: ignore from hypothesis import given from hypothesis.strategies import floats from utils import vectors from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected @given( x=floats(min_value=-1e75, max_value=1e75), y=floats(min_value=-1e75, max_value=1e75), v=vectors(max_magnitude=1e150) ) def test_scalar_associative(x: float, y: float, v: Vector2): left = (x * y) * v right = x * (y * v) assert left.isclose(right)
Add a test of the associativity of scalar multiplication
Add a test of the associativity of scalar multiplication
Python
artistic-2.0
ppb/ppb-vector,ppb/ppb-vector
import pytest # type: ignore from hypothesis import given from hypothesis.strategies import floats from utils import vectors from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected @given( x=floats(min_value=-1e75, max_value=1e75), y=floats(min_value=-1e75, max_value=1e75), v=vectors(max_magnitude=1e150) ) def test_scalar_associative(x: float, y: float, v: Vector2): left = (x * y) * v right = x * (y * v) assert left.isclose(right)
Add a test of the associativity of scalar multiplication import pytest # type: ignore from ppb_vector import Vector2 @pytest.mark.parametrize("x, y, expected", [ (Vector2(6, 1), 0, Vector2(0, 0)), (Vector2(6, 1), 2, Vector2(12, 2)), (Vector2(0, 0), 3, Vector2(0, 0)), (Vector2(-1.5, 2.4), -2, Vector2(3.0, -4.8)), (Vector2(1, 2), 0.1, Vector2(0.1, 0.2)) ]) def test_scalar_multiplication(x, y, expected): assert x * y == expected
5b0f7412f88400e61a05e694d4883389d812f3d2
tests/runtests.py
tests/runtests.py
#!/usr/bin/env python import os import sys from unittest import defaultTestLoader, TextTestRunner, TestSuite TESTS = ('form', 'fields', 'validators', 'widgets', 'webob_wrapper', 'translations', 'ext_csrf', 'ext_i18n') def make_suite(prefix='', extra=()): tests = TESTS + extra test_names = list(prefix + x for x in tests) suite = TestSuite() suite.addTest(defaultTestLoader.loadTestsFromNames(test_names)) return suite def additional_tests(): """ This is called automatically by setup.py test """ return make_suite('tests.') def main(): extra_tests = tuple(x for x in sys.argv[1:] if '-' not in x) suite = make_suite('', ) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) runner = TextTestRunner(verbosity=(sys.argv.count('-v') - sys.argv.count('-q') + 1)) result = runner.run(suite) sys.exit(not result.wasSuccessful()) if __name__ == '__main__': main()
#!/usr/bin/env python import os import sys from unittest import defaultTestLoader, TextTestRunner, TestSuite TESTS = ('form', 'fields', 'validators', 'widgets', 'webob_wrapper', 'translations', 'ext_csrf', 'ext_i18n') def make_suite(prefix='', extra=()): tests = TESTS + extra test_names = list(prefix + x for x in tests) suite = TestSuite() suite.addTest(defaultTestLoader.loadTestsFromNames(test_names)) return suite def additional_tests(): """ This is called automatically by setup.py test """ return make_suite('tests.') def main(): extra_tests = tuple(x for x in sys.argv[1:] if '-' not in x) suite = make_suite('', extra_tests) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) runner = TextTestRunner(verbosity=(sys.argv.count('-v') - sys.argv.count('-q') + 1)) result = runner.run(suite) sys.exit(not result.wasSuccessful()) if __name__ == '__main__': main()
Add back in running of extra tests
Add back in running of extra tests
Python
bsd-3-clause
maxcountryman/wtforms
#!/usr/bin/env python import os import sys from unittest import defaultTestLoader, TextTestRunner, TestSuite TESTS = ('form', 'fields', 'validators', 'widgets', 'webob_wrapper', 'translations', 'ext_csrf', 'ext_i18n') def make_suite(prefix='', extra=()): tests = TESTS + extra test_names = list(prefix + x for x in tests) suite = TestSuite() suite.addTest(defaultTestLoader.loadTestsFromNames(test_names)) return suite def additional_tests(): """ This is called automatically by setup.py test """ return make_suite('tests.') def main(): extra_tests = tuple(x for x in sys.argv[1:] if '-' not in x) suite = make_suite('', extra_tests) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) runner = TextTestRunner(verbosity=(sys.argv.count('-v') - sys.argv.count('-q') + 1)) result = runner.run(suite) sys.exit(not result.wasSuccessful()) if __name__ == '__main__': main()
Add back in running of extra tests #!/usr/bin/env python import os import sys from unittest import defaultTestLoader, TextTestRunner, TestSuite TESTS = ('form', 'fields', 'validators', 'widgets', 'webob_wrapper', 'translations', 'ext_csrf', 'ext_i18n') def make_suite(prefix='', extra=()): tests = TESTS + extra test_names = list(prefix + x for x in tests) suite = TestSuite() suite.addTest(defaultTestLoader.loadTestsFromNames(test_names)) return suite def additional_tests(): """ This is called automatically by setup.py test """ return make_suite('tests.') def main(): extra_tests = tuple(x for x in sys.argv[1:] if '-' not in x) suite = make_suite('', ) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) runner = TextTestRunner(verbosity=(sys.argv.count('-v') - sys.argv.count('-q') + 1)) result = runner.run(suite) sys.exit(not result.wasSuccessful()) if __name__ == '__main__': main()
15c7cc3cf1599efa65896e7138f3015e68ae5998
setup.py
setup.py
#!/usr/bin/env python import sys from setuptools import setup, find_packages requires = ['six'] if sys.version_info[0] == 2: requires += ['python-dateutil>=1.0, <2.0, >=2.1'] else: # Py3k requires += ['python-dateutil>=2.0'] setup( name='freezegun', version='0.2.2', description='Let your Python tests travel through time', author='Steve Pulec', author_email='spulec@gmail', url='https://github.com/spulec/freezegun', packages=find_packages(exclude=("tests", "tests.*",)), install_requires=requires, include_package_data=True, classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], )
#!/usr/bin/env python import sys from setuptools import setup, find_packages requires = ['six'] if sys.version_info[0] == 2: requires += ['python-dateutil>=1.0, != 2.0'] else: # Py3k requires += ['python-dateutil>=2.0'] setup( name='freezegun', version='0.2.2', description='Let your Python tests travel through time', author='Steve Pulec', author_email='spulec@gmail', url='https://github.com/spulec/freezegun', packages=find_packages(exclude=("tests", "tests.*",)), install_requires=requires, include_package_data=True, classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], )
Use a saner requirements for python-dateutil
Use a saner requirements for python-dateutil The requirement >=1.0, <2.0, >=2.1 doesn't make a lot of logical sense and it will break in the future. There is no version that is >= 1.0, and < 2.0, and >= 2.1 becasue these versions are mutually exclusive. Even if you interpret the , as OR it still doesn't make sense because this includes every version. What this spec is actually trying to represent is any version >= 1.0 but not 2.0, so instead we'll just say that.
Python
apache-2.0
spulec/freezegun,Affirm/freezegun,adamchainz/freezegun,Sun77789/freezegun
#!/usr/bin/env python import sys from setuptools import setup, find_packages requires = ['six'] if sys.version_info[0] == 2: requires += ['python-dateutil>=1.0, != 2.0'] else: # Py3k requires += ['python-dateutil>=2.0'] setup( name='freezegun', version='0.2.2', description='Let your Python tests travel through time', author='Steve Pulec', author_email='spulec@gmail', url='https://github.com/spulec/freezegun', packages=find_packages(exclude=("tests", "tests.*",)), install_requires=requires, include_package_data=True, classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], )
Use a saner requirements for python-dateutil The requirement >=1.0, <2.0, >=2.1 doesn't make a lot of logical sense and it will break in the future. There is no version that is >= 1.0, and < 2.0, and >= 2.1 becasue these versions are mutually exclusive. Even if you interpret the , as OR it still doesn't make sense because this includes every version. What this spec is actually trying to represent is any version >= 1.0 but not 2.0, so instead we'll just say that. #!/usr/bin/env python import sys from setuptools import setup, find_packages requires = ['six'] if sys.version_info[0] == 2: requires += ['python-dateutil>=1.0, <2.0, >=2.1'] else: # Py3k requires += ['python-dateutil>=2.0'] setup( name='freezegun', version='0.2.2', description='Let your Python tests travel through time', author='Steve Pulec', author_email='spulec@gmail', url='https://github.com/spulec/freezegun', packages=find_packages(exclude=("tests", "tests.*",)), install_requires=requires, include_package_data=True, classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], )
0ef968528f31da5dd09f016134b4a1ffa6377f84
scripts/slave/chromium/package_source.py
scripts/slave/chromium/package_source.py
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import sys if '__main__' == __name__: sys.exit(0)
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import os import sys from common import chromium_utils from slave import slave_utils FILENAME = 'chromium-src.tgz' GSBASE = 'chromium-browser-csindex' def main(argv): if not os.path.exists('src'): raise Exception('ERROR: no src directory to package, exiting') chromium_utils.RunCommand(['rm', '-f', FILENAME]) if os.path.exists(FILENAME): raise Exception('ERROR: %s cannot be removed, exiting' % FILENAME) if chromium_utils.RunCommand(['tar', 'czf', FILENAME, 'src/']) != 0: raise Exception('ERROR: failed to create %s, exiting' % FILENAME) status = slave_utils.GSUtilCopyFile(FILENAME, GSBASE) if status != 0: raise Exception('ERROR: GSUtilCopyFile error %d. "%s" -> "%s"' % ( status, FILENAME, GSBASE)) return 0 if '__main__' == __name__: sys.exit(main(None))
Create source snapshot and upload to GS.
Create source snapshot and upload to GS. BUG=79198 Review URL: http://codereview.chromium.org/7129020 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@88372 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
eunchong/build,eunchong/build,eunchong/build,eunchong/build
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import os import sys from common import chromium_utils from slave import slave_utils FILENAME = 'chromium-src.tgz' GSBASE = 'chromium-browser-csindex' def main(argv): if not os.path.exists('src'): raise Exception('ERROR: no src directory to package, exiting') chromium_utils.RunCommand(['rm', '-f', FILENAME]) if os.path.exists(FILENAME): raise Exception('ERROR: %s cannot be removed, exiting' % FILENAME) if chromium_utils.RunCommand(['tar', 'czf', FILENAME, 'src/']) != 0: raise Exception('ERROR: failed to create %s, exiting' % FILENAME) status = slave_utils.GSUtilCopyFile(FILENAME, GSBASE) if status != 0: raise Exception('ERROR: GSUtilCopyFile error %d. "%s" -> "%s"' % ( status, FILENAME, GSBASE)) return 0 if '__main__' == __name__: sys.exit(main(None))
Create source snapshot and upload to GS. BUG=79198 Review URL: http://codereview.chromium.org/7129020 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@88372 0039d316-1c4b-4281-b951-d872f2087c98 #!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import sys if '__main__' == __name__: sys.exit(0)
a42a6a54f732ca7eba700b867a3025739ad6a271
list_all_users_in_group.py
list_all_users_in_group.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import grp import pwd import inspect import argparse def list_all_users_in_group(groupname): """Get list of all users of group. Get sorted list of all users of group GROUP, including users with main group GROUP. Origin in https://github.com/vazhnov/list_all_users_in_group """ try: group = grp.getgrnam(groupname) # On error "KeyError: 'getgrnam(): name not found: GROUP'" except KeyError: return None group_all_users_set = set(group.gr_mem) for user in pwd.getpwall(): if user.pw_gid == group.gr_gid: group_all_users_set.add(user.pw_name) return sorted(group_all_users_set) if __name__ == "__main__": parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter') parser.add_argument('groupname', help='Group name') args = parser.parse_args() result = list_all_users_in_group(args.groupname) if result: print (args.delimiter.join(result))
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import grp import pwd import inspect import argparse def list_all_users_in_group(groupname): """Get list of all users of group. Get sorted list of all users of group GROUP, including users with main group GROUP. Origin in https://github.com/vazhnov/list_all_users_in_group """ try: group = grp.getgrnam(groupname) # On error "KeyError: 'getgrnam(): name not found: GROUP'" except KeyError: return None group_all_users_set = set(group.gr_mem) for user in pwd.getpwall(): if user.pw_gid == group.gr_gid: group_all_users_set.add(user.pw_name) return sorted(group_all_users_set) def main(): parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter') parser.add_argument('groupname', help='Group name') args = parser.parse_args() result = list_all_users_in_group(args.groupname) if result: print (args.delimiter.join(result)) if __name__ == "__main__": main()
Move main code to function because of pylint warning 'Invalid constant name'
Move main code to function because of pylint warning 'Invalid constant name'
Python
cc0-1.0
vazhnov/list_all_users_in_group
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import grp import pwd import inspect import argparse def list_all_users_in_group(groupname): """Get list of all users of group. Get sorted list of all users of group GROUP, including users with main group GROUP. Origin in https://github.com/vazhnov/list_all_users_in_group """ try: group = grp.getgrnam(groupname) # On error "KeyError: 'getgrnam(): name not found: GROUP'" except KeyError: return None group_all_users_set = set(group.gr_mem) for user in pwd.getpwall(): if user.pw_gid == group.gr_gid: group_all_users_set.add(user.pw_name) return sorted(group_all_users_set) def main(): parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter') parser.add_argument('groupname', help='Group name') args = parser.parse_args() result = list_all_users_in_group(args.groupname) if result: print (args.delimiter.join(result)) if __name__ == "__main__": main()
Move main code to function because of pylint warning 'Invalid constant name' #! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import grp import pwd import inspect import argparse def list_all_users_in_group(groupname): """Get list of all users of group. Get sorted list of all users of group GROUP, including users with main group GROUP. Origin in https://github.com/vazhnov/list_all_users_in_group """ try: group = grp.getgrnam(groupname) # On error "KeyError: 'getgrnam(): name not found: GROUP'" except KeyError: return None group_all_users_set = set(group.gr_mem) for user in pwd.getpwall(): if user.pw_gid == group.gr_gid: group_all_users_set.add(user.pw_name) return sorted(group_all_users_set) if __name__ == "__main__": parser = argparse.ArgumentParser(description=inspect.getdoc(list_all_users_in_group), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-d', '--delimiter', default='\n', help='Use DELIMITER instead of newline for users delimiter') parser.add_argument('groupname', help='Group name') args = parser.parse_args() result = list_all_users_in_group(args.groupname) if result: print (args.delimiter.join(result))
bb7741ade270458564ea7546d372e39bbbe0f97d
rds/delete_db_instance.py
rds/delete_db_instance.py
#!/usr/bin/env python # a script to delete an rds instance # import the sys and boto3 libraries import sys import boto3 # create an rds client rds = boto3.client('rds') # use the first argument to the script as the name # of the instance to be deleted db = sys.argv[1] try: # delete the instance and catch the response response = rds.delete_db_instance( DBInstanceIdentifier=db, SkipFinalSnapshot=True) # print the response if there are no exceptions print response # if there is an exception, print the error message except Exception as error: print error
#!/usr/bin/env python # a script to delete an rds instance # import the sys and boto3 libraries import sys import boto3 # use the first argument to the script as the name # of the instance to be deleted db = sys.argv[1] # create an rds client rds = boto3.client('rds') try: # delete the instance and catch the response response = rds.delete_db_instance( DBInstanceIdentifier=db, SkipFinalSnapshot=True) # print the response if there are no exceptions print response # if there is an exception, print the error message except Exception as error: print error
Swap db and rds set up
Swap db and rds set up
Python
mit
managedkaos/AWS-Python-Boto3
#!/usr/bin/env python # a script to delete an rds instance # import the sys and boto3 libraries import sys import boto3 # use the first argument to the script as the name # of the instance to be deleted db = sys.argv[1] # create an rds client rds = boto3.client('rds') try: # delete the instance and catch the response response = rds.delete_db_instance( DBInstanceIdentifier=db, SkipFinalSnapshot=True) # print the response if there are no exceptions print response # if there is an exception, print the error message except Exception as error: print error
Swap db and rds set up #!/usr/bin/env python # a script to delete an rds instance # import the sys and boto3 libraries import sys import boto3 # create an rds client rds = boto3.client('rds') # use the first argument to the script as the name # of the instance to be deleted db = sys.argv[1] try: # delete the instance and catch the response response = rds.delete_db_instance( DBInstanceIdentifier=db, SkipFinalSnapshot=True) # print the response if there are no exceptions print response # if there is an exception, print the error message except Exception as error: print error
1de254b56eba45ecdc88d26272ab1f123e734e25
tests/test_dem.py
tests/test_dem.py
import unittest import numpy as np class CalculationMethodsTestCase(unittest.TestCase): def setUp(self): self.dem = DEMGrid() def test_calculate_slope(self): sx, sy = self.dem._calculate_slope() def test_calculate_laplacian(self): del2z = self.dem._calculate_lapalacian() def test_calculate_directional_laplacian(self): alpha = np.pi/4 del2z = self.dem._calculate_lapalacian(alpha) def test_pad_boundary(self): dx = 5 dy = 5 grid = self.dem._griddata pad_x = np.zeros((self.ny, np.round(dx/2)) pad_y = np.zeros((self.nx + 2*np.round(dx/2), np.round(dy/2))) padgrid = np.vstack([pad_y, np.hstack([pad_x, self.dem._griddata, pad_x]), pad_y]]) self.dem._pad_boundary(dx, dy) assertEqual(self.dem.grid, padgrid, 'Grid padded incorrectly')
import unittest import numpy as np import filecmp TESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'data/big_basin.tif') class CalculationMethodsTestCase(unittest.TestCase): def setUp(self): self.dem = DEMGrid(TESTDATA_FILENAME) def test_calculate_slope(self): sx, sy = self.dem._calculate_slope() def test_calculate_laplacian(self): del2z = self.dem._calculate_lapalacian() def test_calculate_directional_laplacian(self): alpha = np.pi/4 del2z = self.dem._calculate_lapalacian(alpha) def test_pad_boundary(self): dx = 5 dy = 5 grid = self.dem._griddata pad_x = np.zeros((self.ny, np.round(dx/2)) pad_y = np.zeros((self.nx + 2*np.round(dx/2), np.round(dy/2))) padgrid = np.vstack([pad_y, np.hstack([pad_x, self.dem._griddata, pad_x]), pad_y]]) self.dem._pad_boundary(dx, dy) assertEqual(self.dem.grid, padgrid, 'Grid padded incorrectly') class BaseSpatialGridTestCase(unittest.TestCase): def setUp(self): self.dem = BaseSpatialGrid(TESTDATA_FILENAME) def test_save(self): os.remove('test.tif') self.save('test.tif') this_file = os.path.join(os.path.dirname(__file__), 'test.tif') test_file = TESTDATA_FILENAME self.assertTrue(filecmp.cmp(this_file, test_file, shallow=False), 'GeoTIFF saved incorrectly')
Add test for writing spatial grid to file
Add test for writing spatial grid to file
Python
mit
stgl/scarplet,rmsare/scarplet
import unittest import numpy as np import filecmp TESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'data/big_basin.tif') class CalculationMethodsTestCase(unittest.TestCase): def setUp(self): self.dem = DEMGrid(TESTDATA_FILENAME) def test_calculate_slope(self): sx, sy = self.dem._calculate_slope() def test_calculate_laplacian(self): del2z = self.dem._calculate_lapalacian() def test_calculate_directional_laplacian(self): alpha = np.pi/4 del2z = self.dem._calculate_lapalacian(alpha) def test_pad_boundary(self): dx = 5 dy = 5 grid = self.dem._griddata pad_x = np.zeros((self.ny, np.round(dx/2)) pad_y = np.zeros((self.nx + 2*np.round(dx/2), np.round(dy/2))) padgrid = np.vstack([pad_y, np.hstack([pad_x, self.dem._griddata, pad_x]), pad_y]]) self.dem._pad_boundary(dx, dy) assertEqual(self.dem.grid, padgrid, 'Grid padded incorrectly') class BaseSpatialGridTestCase(unittest.TestCase): def setUp(self): self.dem = BaseSpatialGrid(TESTDATA_FILENAME) def test_save(self): os.remove('test.tif') self.save('test.tif') this_file = os.path.join(os.path.dirname(__file__), 'test.tif') test_file = TESTDATA_FILENAME self.assertTrue(filecmp.cmp(this_file, test_file, shallow=False), 'GeoTIFF saved incorrectly')
Add test for writing spatial grid to file import unittest import numpy as np class CalculationMethodsTestCase(unittest.TestCase): def setUp(self): self.dem = DEMGrid() def test_calculate_slope(self): sx, sy = self.dem._calculate_slope() def test_calculate_laplacian(self): del2z = self.dem._calculate_lapalacian() def test_calculate_directional_laplacian(self): alpha = np.pi/4 del2z = self.dem._calculate_lapalacian(alpha) def test_pad_boundary(self): dx = 5 dy = 5 grid = self.dem._griddata pad_x = np.zeros((self.ny, np.round(dx/2)) pad_y = np.zeros((self.nx + 2*np.round(dx/2), np.round(dy/2))) padgrid = np.vstack([pad_y, np.hstack([pad_x, self.dem._griddata, pad_x]), pad_y]]) self.dem._pad_boundary(dx, dy) assertEqual(self.dem.grid, padgrid, 'Grid padded incorrectly')
bfd34a7aaf903c823d41068173c09bc5b1a251bc
test/sasdataloader/test/utest_sesans.py
test/sasdataloader/test/utest_sesans.py
""" Unit tests for the SESANS .ses reader """ import unittest from sas.sascalc.dataloader.loader import Loader import os.path class sesans_reader(unittest.TestCase): def setUp(self): self.loader = Loader() def test_sesans_load(self): """ Test .SES file loading """ f =self.loader.load("sphere3micron.ses") # self.assertEqual(f, 5) self.assertEqual(len(f.x), 40) self.assertEqual(f.x[0], 391.56) self.assertEqual(f.x[-1], 46099) self.assertEqual(f.y[-1], -0.19956) self.assertEqual(f.x_unit, "A") self.assertEqual(f.y_unit, "A-2 cm-1") self.assertEqual(f.sample.name, "Polystyrene 2 um in 53% H2O, 47% D2O") self.assertEqual(f.sample.thickness, 0.2) self.assertEqual(f.sample.zacceptance, (0.0168, "radians")) if __name__ == "__main__": unittest.main()
""" Unit tests for the SESANS .ses reader """ import unittest from sas.sascalc.dataloader.loader import Loader import os.path class sesans_reader(unittest.TestCase): def setUp(self): self.loader = Loader() def test_sesans_load(self): """ Test .SES file loading """ f =self.loader.load("sphere3micron.ses") # self.assertEqual(f, 5) self.assertEqual(len(f.x), 40) self.assertEqual(f.x[0], 391.56) self.assertEqual(f.x[-1], 46099) self.assertEqual(f.y[-1], -0.19956) self.assertEqual(f.x_unit, "A") self.assertEqual(f.y_unit, "A-2 cm-1") self.assertEqual(f.sample.name, "Polystyrene 2 um in 53% H2O, 47% D2O") self.assertEqual(f.sample.thickness, 0.2) self.assertEqual(f.sample.zacceptance, (0.0168, "radians")) self.assertEqual(f.isSesans, True) if __name__ == "__main__": unittest.main()
Test that .SES files are tagged as Sesans
Test that .SES files are tagged as Sesans
Python
bsd-3-clause
lewisodriscoll/sasview,lewisodriscoll/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,SasView/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,SasView/sasview,lewisodriscoll/sasview
""" Unit tests for the SESANS .ses reader """ import unittest from sas.sascalc.dataloader.loader import Loader import os.path class sesans_reader(unittest.TestCase): def setUp(self): self.loader = Loader() def test_sesans_load(self): """ Test .SES file loading """ f =self.loader.load("sphere3micron.ses") # self.assertEqual(f, 5) self.assertEqual(len(f.x), 40) self.assertEqual(f.x[0], 391.56) self.assertEqual(f.x[-1], 46099) self.assertEqual(f.y[-1], -0.19956) self.assertEqual(f.x_unit, "A") self.assertEqual(f.y_unit, "A-2 cm-1") self.assertEqual(f.sample.name, "Polystyrene 2 um in 53% H2O, 47% D2O") self.assertEqual(f.sample.thickness, 0.2) self.assertEqual(f.sample.zacceptance, (0.0168, "radians")) self.assertEqual(f.isSesans, True) if __name__ == "__main__": unittest.main()
Test that .SES files are tagged as Sesans """ Unit tests for the SESANS .ses reader """ import unittest from sas.sascalc.dataloader.loader import Loader import os.path class sesans_reader(unittest.TestCase): def setUp(self): self.loader = Loader() def test_sesans_load(self): """ Test .SES file loading """ f =self.loader.load("sphere3micron.ses") # self.assertEqual(f, 5) self.assertEqual(len(f.x), 40) self.assertEqual(f.x[0], 391.56) self.assertEqual(f.x[-1], 46099) self.assertEqual(f.y[-1], -0.19956) self.assertEqual(f.x_unit, "A") self.assertEqual(f.y_unit, "A-2 cm-1") self.assertEqual(f.sample.name, "Polystyrene 2 um in 53% H2O, 47% D2O") self.assertEqual(f.sample.thickness, 0.2) self.assertEqual(f.sample.zacceptance, (0.0168, "radians")) if __name__ == "__main__": unittest.main()
590de85fdb151e11079796c68f300d4fe7559995
setup.py
setup.py
import subprocess import sys from setuptools import Command, setup class RunTests(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, '-m', 'unittest', 'gis_metadata.tests.tests']) raise SystemExit(errno) with open('README.md') as readme: long_description = readme.read() setup( name='gis_metadata_parser', description='Parser for GIS metadata standards including FGDC and ISO-19115', long_description=long_description, long_description_content_type='text/markdown', keywords='arcgis,fgdc,iso,ISO-19115,ISO-19139,gis,metadata,parser,xml,gis_metadata,gis_metadata_parser', version='1.2.0', packages=[ 'gis_metadata', 'gis_metadata.tests' ], install_requires=[ 'frozendict>=1.2', 'parserutils>=1.1', 'six>=1.9.0' ], tests_require=['mock'], url='https://github.com/consbio/gis-metadata-parser', license='BSD', cmdclass={'test': RunTests} )
import subprocess import sys from setuptools import Command, setup class RunTests(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, '-m', 'unittest', 'gis_metadata.tests.tests']) raise SystemExit(errno) with open('README.md') as readme: long_description = readme.read() setup( name='gis_metadata_parser', description='Parser for GIS metadata standards including FGDC and ISO-19115', long_description=long_description, long_description_content_type='text/markdown', keywords='arcgis,fgdc,iso,ISO-19115,ISO-19139,gis,metadata,parser,xml,gis_metadata,gis_metadata_parser', version='1.2.1', packages=[ 'gis_metadata', 'gis_metadata.tests' ], install_requires=[ 'frozendict>=1.2', 'parserutils>=1.1', 'six>=1.9.0' ], tests_require=['mock'], url='https://github.com/consbio/gis-metadata-parser', license='BSD', cmdclass={'test': RunTests} )
Increment version for mock test fix
Increment version for mock test fix
Python
bsd-3-clause
consbio/gis-metadata-parser
import subprocess import sys from setuptools import Command, setup class RunTests(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, '-m', 'unittest', 'gis_metadata.tests.tests']) raise SystemExit(errno) with open('README.md') as readme: long_description = readme.read() setup( name='gis_metadata_parser', description='Parser for GIS metadata standards including FGDC and ISO-19115', long_description=long_description, long_description_content_type='text/markdown', keywords='arcgis,fgdc,iso,ISO-19115,ISO-19139,gis,metadata,parser,xml,gis_metadata,gis_metadata_parser', version='1.2.1', packages=[ 'gis_metadata', 'gis_metadata.tests' ], install_requires=[ 'frozendict>=1.2', 'parserutils>=1.1', 'six>=1.9.0' ], tests_require=['mock'], url='https://github.com/consbio/gis-metadata-parser', license='BSD', cmdclass={'test': RunTests} )
Increment version for mock test fix import subprocess import sys from setuptools import Command, setup class RunTests(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, '-m', 'unittest', 'gis_metadata.tests.tests']) raise SystemExit(errno) with open('README.md') as readme: long_description = readme.read() setup( name='gis_metadata_parser', description='Parser for GIS metadata standards including FGDC and ISO-19115', long_description=long_description, long_description_content_type='text/markdown', keywords='arcgis,fgdc,iso,ISO-19115,ISO-19139,gis,metadata,parser,xml,gis_metadata,gis_metadata_parser', version='1.2.0', packages=[ 'gis_metadata', 'gis_metadata.tests' ], install_requires=[ 'frozendict>=1.2', 'parserutils>=1.1', 'six>=1.9.0' ], tests_require=['mock'], url='https://github.com/consbio/gis-metadata-parser', license='BSD', cmdclass={'test': RunTests} )
effd1010abb7dbe920e11627fe555bacecced194
rst2pdf/utils.py
rst2pdf/utils.py
#$HeadURL$ #$LastChangedDate$ #$LastChangedRevision$ import sys from reportlab.platypus import PageBreak, Spacer from flowables import * import shlex from log import log def parseRaw (data): '''Parse and process a simple DSL to handle creation of flowables. Supported (can add others on request): * PageBreak * Spacer width, height ''' elements=[] lines=data.splitlines() for line in lines: lexer=shlex.shlex(line) lexer.whitespace+=',' tokens=list(lexer) command=tokens[0] if command == 'PageBreak': if len(tokens)==1: elements.append(MyPageBreak()) else: elements.append(MyPageBreak(tokens[1])) if command == 'Spacer': elements.append(Spacer(int(tokens[1]),int(tokens[2]))) if command == 'Transition': elements.append(Transition(*tokens[1:])) return elements # Looks like this is not used anywhere now #def depth (node): # if node.parent==None: # return 0 # else: # return 1+depth(node.parent)
# -*- coding: utf-8 -*- #$HeadURL$ #$LastChangedDate$ #$LastChangedRevision$ import sys from reportlab.platypus import PageBreak, Spacer from flowables import * import shlex from log import log def parseRaw (data): '''Parse and process a simple DSL to handle creation of flowables. Supported (can add others on request): * PageBreak * Spacer width, height ''' elements=[] lines=data.splitlines() for line in lines: lexer=shlex.shlex(line) lexer.whitespace+=',' tokens=list(lexer) command=tokens[0] if command == 'PageBreak': if len(tokens)==1: elements.append(MyPageBreak()) else: elements.append(MyPageBreak(tokens[1])) if command == 'Spacer': elements.append(Spacer(int(tokens[1]),int(tokens[2]))) if command == 'Transition': elements.append(Transition(*tokens[1:])) return elements # Looks like this is not used anywhere now #def depth (node): # if node.parent==None: # return 0 # else: # return 1+depth(node.parent)
Fix encoding (thanks to Yasushi Masuda)
Fix encoding (thanks to Yasushi Masuda) git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@433 3777fadb-0f44-0410-9e7f-9d8fa6171d72
Python
mit
aquavitae/rst2pdf-py3-dev,tonioo/rst2pdf,sychen/rst2pdf,tonioo/rst2pdf,aquavitae/rst2pdf,sychen/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8,aquavitae/rst2pdf-py3-dev,aquavitae/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8
# -*- coding: utf-8 -*- #$HeadURL$ #$LastChangedDate$ #$LastChangedRevision$ import sys from reportlab.platypus import PageBreak, Spacer from flowables import * import shlex from log import log def parseRaw (data): '''Parse and process a simple DSL to handle creation of flowables. Supported (can add others on request): * PageBreak * Spacer width, height ''' elements=[] lines=data.splitlines() for line in lines: lexer=shlex.shlex(line) lexer.whitespace+=',' tokens=list(lexer) command=tokens[0] if command == 'PageBreak': if len(tokens)==1: elements.append(MyPageBreak()) else: elements.append(MyPageBreak(tokens[1])) if command == 'Spacer': elements.append(Spacer(int(tokens[1]),int(tokens[2]))) if command == 'Transition': elements.append(Transition(*tokens[1:])) return elements # Looks like this is not used anywhere now #def depth (node): # if node.parent==None: # return 0 # else: # return 1+depth(node.parent)
Fix encoding (thanks to Yasushi Masuda) git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@433 3777fadb-0f44-0410-9e7f-9d8fa6171d72 #$HeadURL$ #$LastChangedDate$ #$LastChangedRevision$ import sys from reportlab.platypus import PageBreak, Spacer from flowables import * import shlex from log import log def parseRaw (data): '''Parse and process a simple DSL to handle creation of flowables. Supported (can add others on request): * PageBreak * Spacer width, height ''' elements=[] lines=data.splitlines() for line in lines: lexer=shlex.shlex(line) lexer.whitespace+=',' tokens=list(lexer) command=tokens[0] if command == 'PageBreak': if len(tokens)==1: elements.append(MyPageBreak()) else: elements.append(MyPageBreak(tokens[1])) if command == 'Spacer': elements.append(Spacer(int(tokens[1]),int(tokens[2]))) if command == 'Transition': elements.append(Transition(*tokens[1:])) return elements # Looks like this is not used anywhere now #def depth (node): # if node.parent==None: # return 0 # else: # return 1+depth(node.parent)
6775a5c58bd85dce644330dfd509d8f23135c5fe
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.6', 'botocore>=1.5.40,<2.0.0', 'typing==3.5.3.0', 'six>=1.10.0,<2.0.0', 'pip>=9,<10' ] setup( name='chalice', version='0.10.1', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='[email protected]', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6', ], )
#!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.6', 'botocore>=1.5.40,<2.0.0', 'typing==3.5.3.0', 'six>=1.10.0,<2.0.0', 'pip>=9,<10' ] setup( name='chalice', version='0.10.1', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='[email protected]', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6', ], )
Change dev status to beta, not pre-alpha
Change dev status to beta, not pre-alpha There's no RC classifier, so beta looks like the closest one we can use.
Python
apache-2.0
awslabs/chalice
#!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.6', 'botocore>=1.5.40,<2.0.0', 'typing==3.5.3.0', 'six>=1.10.0,<2.0.0', 'pip>=9,<10' ] setup( name='chalice', version='0.10.1', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='[email protected]', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6', ], )
Change dev status to beta, not pre-alpha There's no RC classifier, so beta looks like the closest one we can use. #!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.6', 'botocore>=1.5.40,<2.0.0', 'typing==3.5.3.0', 'six>=1.10.0,<2.0.0', 'pip>=9,<10' ] setup( name='chalice', version='0.10.1', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='[email protected]', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.6', ], )
d48ae791364a0d29d60636adfde1f143858794cd
api/identifiers/serializers.py
api/identifiers/serializers.py
from rest_framework import serializers as ser from api.base.utils import absolute_reverse from api.base.serializers import JSONAPISerializer, RelationshipField, IDField, LinksField class IdentifierSerializer(JSONAPISerializer): category = ser.CharField(read_only=True) filterable_fields = frozenset(['category']) value = ser.CharField(read_only=True) referent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<referent._id>'}, ) id = IDField(source='_id', read_only=True) links = LinksField({'self': 'self_url'}) class Meta: type_ = 'identifiers' def get_absolute_url(self, obj): return obj.absolute_api_v2_url def get_id(self, obj): return obj._id def get_detail_url(self, obj): import ipdb; ipdb.set_trace() return '{}/identifiers/{}'.format(obj.absolute_api_v2_url, obj._id) def self_url(self, obj): return absolute_reverse('identifiers:identifier-detail', kwargs={ 'identifier_id': obj._id, })
from rest_framework import serializers as ser from api.base.utils import absolute_reverse from api.base.serializers import JSONAPISerializer, RelationshipField, IDField, LinksField class IdentifierSerializer(JSONAPISerializer): category = ser.CharField(read_only=True) filterable_fields = frozenset(['category']) value = ser.CharField(read_only=True) referent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<referent._id>'}, ) id = IDField(source='_id', read_only=True) links = LinksField({'self': 'self_url'}) class Meta: type_ = 'identifiers' def get_absolute_url(self, obj): return obj.absolute_api_v2_url def get_id(self, obj): return obj._id def get_detail_url(self, obj): return '{}/identifiers/{}'.format(obj.absolute_api_v2_url, obj._id) def self_url(self, obj): return absolute_reverse('identifiers:identifier-detail', kwargs={ 'identifier_id': obj._id, })
Remove rogue debugger how embarassing
Remove rogue debugger how embarassing
Python
apache-2.0
rdhyee/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,caneruguz/osf.io,acshi/osf.io,abought/osf.io,amyshi188/osf.io,erinspace/osf.io,DanielSBrown/osf.io,chrisseto/osf.io,leb2dg/osf.io,mattclark/osf.io,samchrisinger/osf.io,alexschiller/osf.io,mluke93/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,DanielSBrown/osf.io,crcresearch/osf.io,rdhyee/osf.io,caneruguz/osf.io,laurenrevere/osf.io,amyshi188/osf.io,acshi/osf.io,saradbowman/osf.io,samchrisinger/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,aaxelb/osf.io,mluo613/osf.io,HalcyonChimera/osf.io,zamattiac/osf.io,mfraezz/osf.io,baylee-d/osf.io,DanielSBrown/osf.io,caneruguz/osf.io,samchrisinger/osf.io,laurenrevere/osf.io,mattclark/osf.io,SSJohns/osf.io,acshi/osf.io,mluke93/osf.io,cslzchen/osf.io,wearpants/osf.io,hmoco/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,SSJohns/osf.io,wearpants/osf.io,abought/osf.io,felliott/osf.io,wearpants/osf.io,brianjgeiger/osf.io,SSJohns/osf.io,jnayak1/osf.io,TomBaxter/osf.io,abought/osf.io,pattisdr/osf.io,aaxelb/osf.io,Nesiehr/osf.io,jnayak1/osf.io,crcresearch/osf.io,alexschiller/osf.io,baylee-d/osf.io,baylee-d/osf.io,chennan47/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,emetsger/osf.io,samchrisinger/osf.io,adlius/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,chrisseto/osf.io,jnayak1/osf.io,binoculars/osf.io,erinspace/osf.io,adlius/osf.io,cwisecarver/osf.io,emetsger/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,kwierman/osf.io,wearpants/osf.io,mluo613/osf.io,leb2dg/osf.io,zamattiac/osf.io,adlius/osf.io,cslzchen/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,erinspace/osf.io,monikagrabowska/osf.io,SSJohns/osf.io,chennan47/osf.io,rdhyee/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,pattisdr/osf.io,TomBaxter/osf.io,sloria/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,amyshi188/osf.io,emetsger/osf.io,binoculars/osf.io,Nesiehr/osf.io,zamattiac/osf.io,kwierman/osf.io,mluo613/osf.io,icereval/osf.io,hmoco/osf.io,saradbowman/osf.io,caseyrollins/osf.io,mfraezz/osf.io,caneruguz/osf.io,felliott/osf.io,mluo613/osf.io,rdhyee/osf.io,emetsger/osf.io,Nesiehr/osf.io,alexschiller/osf.io,cslzchen/osf.io,hmoco/osf.io,Nesiehr/osf.io,kwierman/osf.io,chrisseto/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,felliott/osf.io,acshi/osf.io,sloria/osf.io,amyshi188/osf.io,mfraezz/osf.io,adlius/osf.io,HalcyonChimera/osf.io,felliott/osf.io,laurenrevere/osf.io,mluo613/osf.io,caseyrollins/osf.io,aaxelb/osf.io,kwierman/osf.io,monikagrabowska/osf.io,mluke93/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,icereval/osf.io,mattclark/osf.io,cslzchen/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,abought/osf.io,icereval/osf.io,pattisdr/osf.io,DanielSBrown/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,acshi/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,mluke93/osf.io,Johnetordoff/osf.io
from rest_framework import serializers as ser from api.base.utils import absolute_reverse from api.base.serializers import JSONAPISerializer, RelationshipField, IDField, LinksField class IdentifierSerializer(JSONAPISerializer): category = ser.CharField(read_only=True) filterable_fields = frozenset(['category']) value = ser.CharField(read_only=True) referent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<referent._id>'}, ) id = IDField(source='_id', read_only=True) links = LinksField({'self': 'self_url'}) class Meta: type_ = 'identifiers' def get_absolute_url(self, obj): return obj.absolute_api_v2_url def get_id(self, obj): return obj._id def get_detail_url(self, obj): return '{}/identifiers/{}'.format(obj.absolute_api_v2_url, obj._id) def self_url(self, obj): return absolute_reverse('identifiers:identifier-detail', kwargs={ 'identifier_id': obj._id, })
Remove rogue debugger how embarassing from rest_framework import serializers as ser from api.base.utils import absolute_reverse from api.base.serializers import JSONAPISerializer, RelationshipField, IDField, LinksField class IdentifierSerializer(JSONAPISerializer): category = ser.CharField(read_only=True) filterable_fields = frozenset(['category']) value = ser.CharField(read_only=True) referent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<referent._id>'}, ) id = IDField(source='_id', read_only=True) links = LinksField({'self': 'self_url'}) class Meta: type_ = 'identifiers' def get_absolute_url(self, obj): return obj.absolute_api_v2_url def get_id(self, obj): return obj._id def get_detail_url(self, obj): import ipdb; ipdb.set_trace() return '{}/identifiers/{}'.format(obj.absolute_api_v2_url, obj._id) def self_url(self, obj): return absolute_reverse('identifiers:identifier-detail', kwargs={ 'identifier_id': obj._id, })
c94f7e5f2c838c3fdd007229175da680de256b04
tests/configurations/nginx/tests_file_size_limit.py
tests/configurations/nginx/tests_file_size_limit.py
#! coding: utf-8 import subprocess import nose.tools as nt from tests import TestPortalAndino class TestFileSizeLimit(TestPortalAndino.TestPortalAndino): @classmethod def setUpClass(cls): super(TestFileSizeLimit, cls).setUpClass() def test_nginx_configuration_uses_1024_MB_as_file_size_limit(self): size_line = subprocess.check_output('docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf | ' 'grep client_max_body_size', shell=True).strip() print subprocess.check_output('docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf', shell=True).strip() nt.assert_true('1024' in size_line)
#! coding: utf-8 import subprocess import nose.tools as nt from tests import TestPortalAndino class TestFileSizeLimit(TestPortalAndino.TestPortalAndino): @classmethod def setUpClass(cls): super(TestFileSizeLimit, cls).setUpClass() def test_nginx_configuration_uses_1024_MB_as_file_size_limit(self): size_line = subprocess.check_output( 'docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf | grep client_max_body_size', shell=True) nt.assert_true('1024' in size_line)
Revert "Hago un strip del output de subprocess"
Revert "Hago un strip del output de subprocess" This reverts commit f5f21d78d87be641617a7cb920d0869975175e58.
Python
mit
datosgobar/portal-andino,datosgobar/portal-andino
#! coding: utf-8 import subprocess import nose.tools as nt from tests import TestPortalAndino class TestFileSizeLimit(TestPortalAndino.TestPortalAndino): @classmethod def setUpClass(cls): super(TestFileSizeLimit, cls).setUpClass() def test_nginx_configuration_uses_1024_MB_as_file_size_limit(self): size_line = subprocess.check_output( 'docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf | grep client_max_body_size', shell=True) nt.assert_true('1024' in size_line)
Revert "Hago un strip del output de subprocess" This reverts commit f5f21d78d87be641617a7cb920d0869975175e58. #! coding: utf-8 import subprocess import nose.tools as nt from tests import TestPortalAndino class TestFileSizeLimit(TestPortalAndino.TestPortalAndino): @classmethod def setUpClass(cls): super(TestFileSizeLimit, cls).setUpClass() def test_nginx_configuration_uses_1024_MB_as_file_size_limit(self): size_line = subprocess.check_output('docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf | ' 'grep client_max_body_size', shell=True).strip() print subprocess.check_output('docker exec -it andino-nginx cat /etc/nginx/conf.d/default.conf', shell=True).strip() nt.assert_true('1024' in size_line)
fe2bd7cf8b0139e1c7c1037d89929dd7c4093458
setup.py
setup.py
import os from setuptools import setup, find_packages import glob VERSION = "0.6.3" src_dir = os.path.dirname(__file__) install_requires = [ "troposphere>=1.2.2", "boto3>=1.3.1", "botocore>=1.4.38", "PyYAML>=3.11", "awacs>=0.5.3", "colorama==0.3.7", ] tests_require = [ "nose>=1.0", "mock==1.0.1", "stacker_blueprints", "moto", "testfixtures", ] def read(filename): full_path = os.path.join(src_dir, filename) with open(full_path) as fd: return fd.read() if __name__ == "__main__": setup( name="stacker", version=VERSION, author="Michael Barrett", author_email="[email protected]", license="New BSD license", url="https://github.com/remind101/stacker", description="Opinionated AWS CloudFormation Stack manager", long_description=read("README.rst"), packages=find_packages(), scripts=glob.glob(os.path.join(src_dir, "scripts", "*")), install_requires=install_requires, tests_require=tests_require, test_suite="nose.collector", )
import os from setuptools import setup, find_packages import glob VERSION = "0.6.3" src_dir = os.path.dirname(__file__) install_requires = [ "troposphere>=1.8.0", "boto3>=1.3.1", "botocore>=1.4.38", "PyYAML>=3.11", "awacs>=0.6.0", "colorama==0.3.7", ] tests_require = [ "nose>=1.0", "mock==1.0.1", "stacker_blueprints", "moto", "testfixtures", ] def read(filename): full_path = os.path.join(src_dir, filename) with open(full_path) as fd: return fd.read() if __name__ == "__main__": setup( name="stacker", version=VERSION, author="Michael Barrett", author_email="[email protected]", license="New BSD license", url="https://github.com/remind101/stacker", description="Opinionated AWS CloudFormation Stack manager", long_description=read("README.rst"), packages=find_packages(), scripts=glob.glob(os.path.join(src_dir, "scripts", "*")), install_requires=install_requires, tests_require=tests_require, test_suite="nose.collector", )
Update troposphere & awacs to latest releases
Update troposphere & awacs to latest releases
Python
bsd-2-clause
mhahn/stacker,mhahn/stacker,remind101/stacker,remind101/stacker
import os from setuptools import setup, find_packages import glob VERSION = "0.6.3" src_dir = os.path.dirname(__file__) install_requires = [ "troposphere>=1.8.0", "boto3>=1.3.1", "botocore>=1.4.38", "PyYAML>=3.11", "awacs>=0.6.0", "colorama==0.3.7", ] tests_require = [ "nose>=1.0", "mock==1.0.1", "stacker_blueprints", "moto", "testfixtures", ] def read(filename): full_path = os.path.join(src_dir, filename) with open(full_path) as fd: return fd.read() if __name__ == "__main__": setup( name="stacker", version=VERSION, author="Michael Barrett", author_email="[email protected]", license="New BSD license", url="https://github.com/remind101/stacker", description="Opinionated AWS CloudFormation Stack manager", long_description=read("README.rst"), packages=find_packages(), scripts=glob.glob(os.path.join(src_dir, "scripts", "*")), install_requires=install_requires, tests_require=tests_require, test_suite="nose.collector", )
Update troposphere & awacs to latest releases import os from setuptools import setup, find_packages import glob VERSION = "0.6.3" src_dir = os.path.dirname(__file__) install_requires = [ "troposphere>=1.2.2", "boto3>=1.3.1", "botocore>=1.4.38", "PyYAML>=3.11", "awacs>=0.5.3", "colorama==0.3.7", ] tests_require = [ "nose>=1.0", "mock==1.0.1", "stacker_blueprints", "moto", "testfixtures", ] def read(filename): full_path = os.path.join(src_dir, filename) with open(full_path) as fd: return fd.read() if __name__ == "__main__": setup( name="stacker", version=VERSION, author="Michael Barrett", author_email="[email protected]", license="New BSD license", url="https://github.com/remind101/stacker", description="Opinionated AWS CloudFormation Stack manager", long_description=read("README.rst"), packages=find_packages(), scripts=glob.glob(os.path.join(src_dir, "scripts", "*")), install_requires=install_requires, tests_require=tests_require, test_suite="nose.collector", )
e66468faaf9c4885f13545329baa20fe4914f49c
historia.py
historia.py
from eve import Eve from eve_swagger import swagger from eve.auth import BasicAuth from config import * from hashlib import md5 class MyBasicAuth(BasicAuth): def check_auth(self, username, password, allowed_roles, resource, method): accounts = app.data.driver.db['accounts'] account = accounts.find_one({'username': username}) return account and password == account['password'] def set_reporter(request, lookup): print request app = Eve(auth=MyBasicAuth) app.on_pre_PUT_event += set_reporter app.register_blueprint(swagger) app.config['SWAGGER_INFO'] = SWAGGER_INFO app.config['SWAGGER_HOST'] = SWAGGER_HOST if __name__ == '__main__': app.run(host=LISTEN_IP, port=LISTEN_PORT)
from eve import Eve from eve_swagger import swagger from eve.auth import BasicAuth from config import * from hashlib import md5 class MyBasicAuth(BasicAuth): def check_auth(self, username, password, allowed_roles, resource, method): accounts = app.data.driver.db['accounts'] account = accounts.find_one({'username': username}) return account and md5(password).hexdigest() == account['password'] def set_reporter(request, lookup): print request app = Eve(auth=MyBasicAuth) app.on_pre_PUT_event += set_reporter app.register_blueprint(swagger) app.config['SWAGGER_INFO'] = SWAGGER_INFO app.config['SWAGGER_HOST'] = SWAGGER_HOST if __name__ == '__main__': app.run(host=LISTEN_IP, port=LISTEN_PORT)
Use MD5 to encode passwords
Use MD5 to encode passwords
Python
mit
waoliveros/historia
from eve import Eve from eve_swagger import swagger from eve.auth import BasicAuth from config import * from hashlib import md5 class MyBasicAuth(BasicAuth): def check_auth(self, username, password, allowed_roles, resource, method): accounts = app.data.driver.db['accounts'] account = accounts.find_one({'username': username}) return account and md5(password).hexdigest() == account['password'] def set_reporter(request, lookup): print request app = Eve(auth=MyBasicAuth) app.on_pre_PUT_event += set_reporter app.register_blueprint(swagger) app.config['SWAGGER_INFO'] = SWAGGER_INFO app.config['SWAGGER_HOST'] = SWAGGER_HOST if __name__ == '__main__': app.run(host=LISTEN_IP, port=LISTEN_PORT)
Use MD5 to encode passwords from eve import Eve from eve_swagger import swagger from eve.auth import BasicAuth from config import * from hashlib import md5 class MyBasicAuth(BasicAuth): def check_auth(self, username, password, allowed_roles, resource, method): accounts = app.data.driver.db['accounts'] account = accounts.find_one({'username': username}) return account and password == account['password'] def set_reporter(request, lookup): print request app = Eve(auth=MyBasicAuth) app.on_pre_PUT_event += set_reporter app.register_blueprint(swagger) app.config['SWAGGER_INFO'] = SWAGGER_INFO app.config['SWAGGER_HOST'] = SWAGGER_HOST if __name__ == '__main__': app.run(host=LISTEN_IP, port=LISTEN_PORT)
6bdcda14c8bd5b66bc6fcb4bb6a520e326211f74
poll/models.py
poll/models.py
from django.db import models class QuestionGroup(models.Model): heading = models.TextField() text = models.TextField(blank=True) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Question(models.Model): text = models.TextField() question_group = models.ForeignKey(QuestionGroup) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Choice(models.Model): text = models.TextField() question = models.ForeignKey(Question) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Session(models.Model): name = models.TextField(blank=True) ip = models.CharField(max_length=200) date_added = models.DateTimeField(auto_now=True) date_submitted = models.DateTimeField(null=True) class Response(models.Model): choice = models.ForeignKey(Choice) session = models.ForeignKey(Session) value = models.IntegerField() date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True)
from django.db import models class QuestionGroup(models.Model): heading = models.TextField() text = models.TextField(blank=True) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.heading class Question(models.Model): text = models.TextField() question_group = models.ForeignKey(QuestionGroup) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.text class Choice(models.Model): text = models.TextField() question = models.ForeignKey(Question) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.text class Session(models.Model): name = models.TextField(blank=True) ip = models.CharField(max_length=200) date_added = models.DateTimeField(auto_now=True) date_submitted = models.DateTimeField(null=True) class Response(models.Model): choice = models.ForeignKey(Choice) session = models.ForeignKey(Session) value = models.IntegerField() date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True)
Implement __str__ for proper printing in admin
Implement __str__ for proper printing in admin
Python
mit
gabriel-v/psi,gabriel-v/psi,gabriel-v/psi
from django.db import models class QuestionGroup(models.Model): heading = models.TextField() text = models.TextField(blank=True) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.heading class Question(models.Model): text = models.TextField() question_group = models.ForeignKey(QuestionGroup) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.text class Choice(models.Model): text = models.TextField() question = models.ForeignKey(Question) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.id) + ". " + self.text class Session(models.Model): name = models.TextField(blank=True) ip = models.CharField(max_length=200) date_added = models.DateTimeField(auto_now=True) date_submitted = models.DateTimeField(null=True) class Response(models.Model): choice = models.ForeignKey(Choice) session = models.ForeignKey(Session) value = models.IntegerField() date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True)
Implement __str__ for proper printing in admin from django.db import models class QuestionGroup(models.Model): heading = models.TextField() text = models.TextField(blank=True) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Question(models.Model): text = models.TextField() question_group = models.ForeignKey(QuestionGroup) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Choice(models.Model): text = models.TextField() question = models.ForeignKey(Question) date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True) class Session(models.Model): name = models.TextField(blank=True) ip = models.CharField(max_length=200) date_added = models.DateTimeField(auto_now=True) date_submitted = models.DateTimeField(null=True) class Response(models.Model): choice = models.ForeignKey(Choice) session = models.ForeignKey(Session) value = models.IntegerField() date_added = models.DateTimeField(auto_now=True) date_modified = models.DateTimeField(auto_now_add=True)
5cfbf23ff88a2d028fdd852adc735263c060f4eb
inet/inet.py
inet/inet.py
# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
Add read data file functionality to Inet class init
Add read data file functionality to Inet class init
Python
mit
nestauk/inet
# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
Add read data file functionality to Inet class init
20a801255ab505641e1ec0d449a4b36411c673bc
indra/tests/test_tas.py
indra/tests/test_tas.py
from nose.plugins.attrib import attr from indra.sources.tas import process_from_web @attr('slow') def test_processor(): tp = process_from_web(affinity_class_limit=10) assert tp assert tp.statements num_stmts = len(tp.statements) # This is the total number of statements about human genes assert num_stmts == 1601159, num_stmts assert all(len(s.evidence) == 1 for s in tp.statements), \ "Some statements lack evidence, or have extra evidence."
from nose.plugins.attrib import attr from indra.sources.tas import process_from_web @attr('slow') def test_processor(): tp = process_from_web(affinity_class_limit=10) assert tp assert tp.statements num_stmts = len(tp.statements) # This is the total number of statements about human genes assert num_stmts == 1175682, num_stmts assert all(len(s.evidence) >= 1 for s in tp.statements), \ 'Some statements lack any evidence'
Update test for current evidence aggregation
Update test for current evidence aggregation
Python
bsd-2-clause
sorgerlab/indra,sorgerlab/indra,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,bgyori/indra,bgyori/indra,johnbachman/belpy,johnbachman/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,johnbachman/indra,johnbachman/indra,bgyori/indra
from nose.plugins.attrib import attr from indra.sources.tas import process_from_web @attr('slow') def test_processor(): tp = process_from_web(affinity_class_limit=10) assert tp assert tp.statements num_stmts = len(tp.statements) # This is the total number of statements about human genes assert num_stmts == 1175682, num_stmts assert all(len(s.evidence) >= 1 for s in tp.statements), \ 'Some statements lack any evidence'
Update test for current evidence aggregation from nose.plugins.attrib import attr from indra.sources.tas import process_from_web @attr('slow') def test_processor(): tp = process_from_web(affinity_class_limit=10) assert tp assert tp.statements num_stmts = len(tp.statements) # This is the total number of statements about human genes assert num_stmts == 1601159, num_stmts assert all(len(s.evidence) == 1 for s in tp.statements), \ "Some statements lack evidence, or have extra evidence."
480852bb1dd6796b7fb12e40edc924b9a4dbee60
test/test_misc.py
test/test_misc.py
import unittest from .helpers import run_module class MiscTests(unittest.TestCase): def setUp(self): self.name = "benchmarker" def test_no_framework(self): with self.assertRaises(Exception): run_module(self.name) def test_no_problem(self): with self.assertRaises(Exception): run_module(self.name, "--framework=pytorch")
Add tests to cover no framework, no problem
Add tests to cover no framework, no problem
Python
mpl-2.0
undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker
import unittest from .helpers import run_module class MiscTests(unittest.TestCase): def setUp(self): self.name = "benchmarker" def test_no_framework(self): with self.assertRaises(Exception): run_module(self.name) def test_no_problem(self): with self.assertRaises(Exception): run_module(self.name, "--framework=pytorch")
Add tests to cover no framework, no problem
a684564eace2185b40acf3413c8f75587195ff46
unitary/examples/tictactoe/ascii_board.py
unitary/examples/tictactoe/ascii_board.py
# Copyright 2022 Google # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult from unitary.examples.tictactoe.tic_tac_toe import TicTacToe def _flip_turn(turn: TicTacSquare): return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X class AsciiBoard: def __init__(self): self.board = TicTacToe() def play(self): turn = TicTacSquare.X result = TicTacResult.UNFINISHED while result == TicTacResult.UNFINISHED: print(self.board.print()) move = input(f"{turn.name} turn to move: ") result = self.board.move(move, turn) turn = _flip_turn(turn) print(f"Result: {result.name}") if __name__ == "__main__": AsciiBoard().play()
Add ASCII board for Quantum TicTacToe board
Add ASCII board for Quantum TicTacToe board - Add preliminary ASCII board for Quantum TicTacToe - Displays probability for blank (.) and X and O.
Python
apache-2.0
quantumlib/unitary,quantumlib/unitary
# Copyright 2022 Google # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult from unitary.examples.tictactoe.tic_tac_toe import TicTacToe def _flip_turn(turn: TicTacSquare): return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X class AsciiBoard: def __init__(self): self.board = TicTacToe() def play(self): turn = TicTacSquare.X result = TicTacResult.UNFINISHED while result == TicTacResult.UNFINISHED: print(self.board.print()) move = input(f"{turn.name} turn to move: ") result = self.board.move(move, turn) turn = _flip_turn(turn) print(f"Result: {result.name}") if __name__ == "__main__": AsciiBoard().play()
Add ASCII board for Quantum TicTacToe board - Add preliminary ASCII board for Quantum TicTacToe - Displays probability for blank (.) and X and O.
2bdadadbfc50aa1a99752705f96358a1076e1951
openstack/tests/functional/telemetry/v2/test_resource.py
openstack/tests/functional/telemetry/v2/test_resource.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestResource(base.BaseFunctionalTest): def test_list(self): ids = [o.resource_id for o in self.conn.telemetry.resources()] self.assertNotEqual(0, len(ids))
Add functional tests for telementry resource
Add functional tests for telementry resource Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3
Python
apache-2.0
stackforge/python-openstacksdk,mtougeron/python-openstacksdk,briancurtin/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,dudymas/python-openstacksdk,dudymas/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk,dtroyer/python-openstacksdk
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack.tests.functional import base class TestResource(base.BaseFunctionalTest): def test_list(self): ids = [o.resource_id for o in self.conn.telemetry.resources()] self.assertNotEqual(0, len(ids))
Add functional tests for telementry resource Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3
caf9795cf0f775442bd0c3e06cd550a6e8d0206b
virtool/labels/db.py
virtool/labels/db.py
async def count_samples(db, label_id): return await db.samples.count_documents({"labels": {"$in": [label_id]}})
async def attach_sample_count(db, document, label_id): document.update({"count": await db.samples.count_documents({"labels": {"$in": [label_id]}})})
Rewrite function for sample count
Rewrite function for sample count
Python
mit
virtool/virtool,igboyes/virtool,virtool/virtool,igboyes/virtool
async def attach_sample_count(db, document, label_id): document.update({"count": await db.samples.count_documents({"labels": {"$in": [label_id]}})})
Rewrite function for sample count async def count_samples(db, label_id): return await db.samples.count_documents({"labels": {"$in": [label_id]}})
2fb27cf8f4399ec6aba36b86d2993e6c3b81d0ee
coalib/bearlib/languages/__init__.py
coalib/bearlib/languages/__init__.py
""" This directory holds means to get generic information for specific languages. """ # Start ignoring PyUnusedCodeBear from .Language import Language from .Language import Languages from .definitions.Unknown import Unknown from .definitions.C import C from .definitions.CPP import CPP from .definitions.CSharp import CSharp from .definitions.CSS import CSS from .definitions.Java import Java from .definitions.JavaScript import JavaScript from .definitions.Python import Python from .definitions.Vala import Vala from .definitions.html import HTML # Stop ignoring PyUnusedCodeBear
""" This directory holds means to get generic information for specific languages. """ # Start ignoring PyUnusedCodeBear from .Language import Language from .Language import Languages from .definitions.Unknown import Unknown from .definitions.C import C from .definitions.CPP import CPP from .definitions.CSharp import CSharp from .definitions.CSS import CSS from .definitions.Fortran import Fortran from .definitions.Golang import Golang from .definitions.html import HTML from .definitions.Java import Java from .definitions.JavaScript import JavaScript from .definitions.JSP import JSP from .definitions.Matlab import Matlab from .definitions.ObjectiveC import ObjectiveC from .definitions.PHP import PHP from .definitions.PLSQL import PLSQL from .definitions.Python import Python from .definitions.Ruby import Ruby from .definitions.Scala import Scala from .definitions.Swift import Swift from .definitions.Vala import Vala # Stop ignoring PyUnusedCodeBear
Add definition into default import
Language: Add definition into default import Fixes https://github.com/coala/coala/issues/4688
Python
agpl-3.0
coala/coala,SanketDG/coala,shreyans800755/coala,karansingh1559/coala,kartikeys98/coala,kartikeys98/coala,jayvdb/coala,CruiseDevice/coala,Nosferatul/coala,shreyans800755/coala,aptrishu/coala,nemaniarjun/coala,aptrishu/coala,karansingh1559/coala,jayvdb/coala,rimacone/testing2,Asalle/coala,CruiseDevice/coala,shreyans800755/coala,coala-analyzer/coala,coala-analyzer/coala,nemaniarjun/coala,karansingh1559/coala,Asalle/coala,coala/coala,SanketDG/coala,coala-analyzer/coala,SanketDG/coala,rimacone/testing2,CruiseDevice/coala,coala/coala,aptrishu/coala,Nosferatul/coala,kartikeys98/coala,jayvdb/coala,Nosferatul/coala,rimacone/testing2,Asalle/coala,nemaniarjun/coala
""" This directory holds means to get generic information for specific languages. """ # Start ignoring PyUnusedCodeBear from .Language import Language from .Language import Languages from .definitions.Unknown import Unknown from .definitions.C import C from .definitions.CPP import CPP from .definitions.CSharp import CSharp from .definitions.CSS import CSS from .definitions.Fortran import Fortran from .definitions.Golang import Golang from .definitions.html import HTML from .definitions.Java import Java from .definitions.JavaScript import JavaScript from .definitions.JSP import JSP from .definitions.Matlab import Matlab from .definitions.ObjectiveC import ObjectiveC from .definitions.PHP import PHP from .definitions.PLSQL import PLSQL from .definitions.Python import Python from .definitions.Ruby import Ruby from .definitions.Scala import Scala from .definitions.Swift import Swift from .definitions.Vala import Vala # Stop ignoring PyUnusedCodeBear
Language: Add definition into default import Fixes https://github.com/coala/coala/issues/4688 """ This directory holds means to get generic information for specific languages. """ # Start ignoring PyUnusedCodeBear from .Language import Language from .Language import Languages from .definitions.Unknown import Unknown from .definitions.C import C from .definitions.CPP import CPP from .definitions.CSharp import CSharp from .definitions.CSS import CSS from .definitions.Java import Java from .definitions.JavaScript import JavaScript from .definitions.Python import Python from .definitions.Vala import Vala from .definitions.html import HTML # Stop ignoring PyUnusedCodeBear
f3803452c669aa35ca71f00c18f613e276a70ca2
scripts/add_users.py
scripts/add_users.py
#!/usr/bin/env python """ Add a series of users from a file of JSON objects, one per line. The JSON user object lines can have the following fields: {"name": "A. Non", "password": "pass12345", 'emailAddress': "[email protected]", "role": "supplier", "supplierId": 12345} Usage: add-users.py <data_api_endpoint> <data_api_token> <users_path> """ from docopt import docopt from dmutils.apiclient import DataAPIClient import json def load_users(users_path): with open(users_path) as f: for line in f: yield json.loads(line) def update_suppliers(data_api_endpoint, data_api_token, users_path): client = DataAPIClient(data_api_endpoint, data_api_token) for user in load_users(users_path): print("Adding {}".format(user)) client.create_user(user) if __name__ == '__main__': arguments = docopt(__doc__) update_suppliers( data_api_endpoint=arguments['<data_api_endpoint>'], data_api_token=arguments['<data_api_token>'], users_path=arguments['<users_path>'])
#!/usr/bin/env python """ Add a series of users from a file of JSON objects, one per line. The JSON user object lines can have the following fields: {"name": "A. Non", "password": "pass12345", "emailAddress": "[email protected]", "role": "supplier", "supplierId": 12345} Usage: add-users.py <data_api_endpoint> <data_api_token> <users_path> """ from docopt import docopt from dmutils.apiclient import DataAPIClient import json def load_users(users_path): with open(users_path) as f: for line in f: yield json.loads(line) def update_suppliers(data_api_endpoint, data_api_token, users_path): client = DataAPIClient(data_api_endpoint, data_api_token) for user in load_users(users_path): print("Adding {}".format(user)) client.create_user(user) if __name__ == '__main__': arguments = docopt(__doc__) update_suppliers( data_api_endpoint=arguments['<data_api_endpoint>'], data_api_token=arguments['<data_api_token>'], users_path=arguments['<users_path>'])
Fix example of how to run script, and make it executable
Fix example of how to run script, and make it executable
Python
mit
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
#!/usr/bin/env python """ Add a series of users from a file of JSON objects, one per line. The JSON user object lines can have the following fields: {"name": "A. Non", "password": "pass12345", "emailAddress": "[email protected]", "role": "supplier", "supplierId": 12345} Usage: add-users.py <data_api_endpoint> <data_api_token> <users_path> """ from docopt import docopt from dmutils.apiclient import DataAPIClient import json def load_users(users_path): with open(users_path) as f: for line in f: yield json.loads(line) def update_suppliers(data_api_endpoint, data_api_token, users_path): client = DataAPIClient(data_api_endpoint, data_api_token) for user in load_users(users_path): print("Adding {}".format(user)) client.create_user(user) if __name__ == '__main__': arguments = docopt(__doc__) update_suppliers( data_api_endpoint=arguments['<data_api_endpoint>'], data_api_token=arguments['<data_api_token>'], users_path=arguments['<users_path>'])
Fix example of how to run script, and make it executable #!/usr/bin/env python """ Add a series of users from a file of JSON objects, one per line. The JSON user object lines can have the following fields: {"name": "A. Non", "password": "pass12345", 'emailAddress': "[email protected]", "role": "supplier", "supplierId": 12345} Usage: add-users.py <data_api_endpoint> <data_api_token> <users_path> """ from docopt import docopt from dmutils.apiclient import DataAPIClient import json def load_users(users_path): with open(users_path) as f: for line in f: yield json.loads(line) def update_suppliers(data_api_endpoint, data_api_token, users_path): client = DataAPIClient(data_api_endpoint, data_api_token) for user in load_users(users_path): print("Adding {}".format(user)) client.create_user(user) if __name__ == '__main__': arguments = docopt(__doc__) update_suppliers( data_api_endpoint=arguments['<data_api_endpoint>'], data_api_token=arguments['<data_api_token>'], users_path=arguments['<users_path>'])
19d366141ffedbabc93de487d140333de30e4b7a
rcamp/lib/pam_backend.py
rcamp/lib/pam_backend.py
from django.conf import settings from accounts.models import ( RcLdapUser, User ) import pam import logging logger = logging.getLogger('accounts') class PamBackend(): def authenticate(self, request, username=None, password=None): rc_user = RcLdapUser.objects.get_user_from_suffixed_username(username) if not rc_user: return None p = pam.pam() authed = p.authenticate(username, password, service=settings.PAM_SERVICES['default']) logging.info('User {} auth attempt: {}'.format(username, authed)) if authed: user_dict = { 'first_name': rc_user.first_name, 'last_name': rc_user.last_name, 'email': rc_user.email, } user, created = User.objects.update_or_create( username=username, defaults=user_dict ) return user return None def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None
from django.conf import settings from accounts.models import ( RcLdapUser, User ) import pam import logging logger = logging.getLogger('accounts') class PamBackend(): def authenticate(self, request, username=None, password=None): rc_user = RcLdapUser.objects.get_user_from_suffixed_username(username) if not rc_user: return None logging.info('User {} auth attempt'.format(username)) p = pam.pam() authed = p.authenticate(username, password, service=settings.PAM_SERVICES['default']) logging.info('User {} auth attempt status: {}'.format(username, authed)) if authed: user_dict = { 'first_name': rc_user.first_name, 'last_name': rc_user.last_name, 'email': rc_user.email, } user, created = User.objects.update_or_create( username=username, defaults=user_dict ) return user return None def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None
Add logging to debug hanging auth
Add logging to debug hanging auth
Python
mit
ResearchComputing/RCAMP,ResearchComputing/RCAMP,ResearchComputing/RCAMP,ResearchComputing/RCAMP
from django.conf import settings from accounts.models import ( RcLdapUser, User ) import pam import logging logger = logging.getLogger('accounts') class PamBackend(): def authenticate(self, request, username=None, password=None): rc_user = RcLdapUser.objects.get_user_from_suffixed_username(username) if not rc_user: return None logging.info('User {} auth attempt'.format(username)) p = pam.pam() authed = p.authenticate(username, password, service=settings.PAM_SERVICES['default']) logging.info('User {} auth attempt status: {}'.format(username, authed)) if authed: user_dict = { 'first_name': rc_user.first_name, 'last_name': rc_user.last_name, 'email': rc_user.email, } user, created = User.objects.update_or_create( username=username, defaults=user_dict ) return user return None def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None
Add logging to debug hanging auth from django.conf import settings from accounts.models import ( RcLdapUser, User ) import pam import logging logger = logging.getLogger('accounts') class PamBackend(): def authenticate(self, request, username=None, password=None): rc_user = RcLdapUser.objects.get_user_from_suffixed_username(username) if not rc_user: return None p = pam.pam() authed = p.authenticate(username, password, service=settings.PAM_SERVICES['default']) logging.info('User {} auth attempt: {}'.format(username, authed)) if authed: user_dict = { 'first_name': rc_user.first_name, 'last_name': rc_user.last_name, 'email': rc_user.email, } user, created = User.objects.update_or_create( username=username, defaults=user_dict ) return user return None def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None